diff --git a/.gitattributes b/.gitattributes index df8dbb5445b4d99f6f62b5ba6d1dce666bbe481a..2ae48f9c398030a6d8377bab68b85c6e251b3863 100644 --- a/.gitattributes +++ b/.gitattributes @@ -9199,3 +9199,71 @@ ONFJT4oBgHgl3EQfHix_/content/2301.11452v1.pdf filter=lfs diff=lfs merge=lfs -tex K9E1T4oBgHgl3EQfswUE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text zdAyT4oBgHgl3EQfO_bl/content/2301.00018v1.pdf filter=lfs diff=lfs merge=lfs -text ytFQT4oBgHgl3EQfBjWV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +LtE0T4oBgHgl3EQfiwHh/content/2301.02451v1.pdf filter=lfs diff=lfs merge=lfs -text +zNAyT4oBgHgl3EQfn_iY/content/2301.00499v1.pdf filter=lfs diff=lfs merge=lfs -text +i9FAT4oBgHgl3EQfaB2u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +u9A0T4oBgHgl3EQfL_8x/content/2301.02125v1.pdf filter=lfs diff=lfs merge=lfs -text +5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf filter=lfs diff=lfs merge=lfs -text +GdA0T4oBgHgl3EQfBf_u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +OtE0T4oBgHgl3EQf0wJ3/content/2301.02690v1.pdf filter=lfs diff=lfs merge=lfs -text +UNE5T4oBgHgl3EQfbQ8x/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +adE4T4oBgHgl3EQfoA0n/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +mdFST4oBgHgl3EQfKTg2/content/2301.13736v1.pdf filter=lfs diff=lfs merge=lfs -text +29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +K9E1T4oBgHgl3EQfswUE/content/2301.03368v1.pdf filter=lfs diff=lfs merge=lfs -text +0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf filter=lfs diff=lfs merge=lfs -text +sNFIT4oBgHgl3EQfyiuq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +MtE4T4oBgHgl3EQf8w72/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +oNE3T4oBgHgl3EQfLAnj/content/2301.04360v1.pdf filter=lfs diff=lfs merge=lfs -text +5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf filter=lfs diff=lfs merge=lfs -text +69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +GNAyT4oBgHgl3EQfrfkX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf filter=lfs diff=lfs merge=lfs -text +GdAyT4oBgHgl3EQfrflY/content/2301.00561v1.pdf filter=lfs diff=lfs merge=lfs -text +nNAzT4oBgHgl3EQfqP3M/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +zNAyT4oBgHgl3EQfn_iY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +stFAT4oBgHgl3EQfgh1S/content/2301.08588v1.pdf filter=lfs diff=lfs merge=lfs -text +GdAyT4oBgHgl3EQfrflY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +u9A0T4oBgHgl3EQfL_8x/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf filter=lfs diff=lfs merge=lfs -text +3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf filter=lfs diff=lfs merge=lfs -text +BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf filter=lfs diff=lfs merge=lfs -text +gdE0T4oBgHgl3EQf6QJf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_NFQT4oBgHgl3EQf7jbZ/content/2301.13443v1.pdf filter=lfs diff=lfs merge=lfs -text +5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf filter=lfs diff=lfs merge=lfs -text +BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf filter=lfs diff=lfs merge=lfs -text +TtE0T4oBgHgl3EQfVAA3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf filter=lfs diff=lfs merge=lfs -text +qNE0T4oBgHgl3EQfqwHK/content/2301.02558v1.pdf filter=lfs diff=lfs merge=lfs -text +ztAyT4oBgHgl3EQfn_j3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +OtE4T4oBgHgl3EQfkA1f/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +nNAzT4oBgHgl3EQfqP3M/content/2301.01627v1.pdf filter=lfs diff=lfs merge=lfs -text +39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +XNE0T4oBgHgl3EQfmQGx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +YtAyT4oBgHgl3EQf9fqe/content/2301.00876v1.pdf filter=lfs diff=lfs merge=lfs -text +FdAyT4oBgHgl3EQfrPl7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf filter=lfs diff=lfs merge=lfs -text +8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf filter=lfs diff=lfs merge=lfs -text +qNE0T4oBgHgl3EQfqwHK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +OtE0T4oBgHgl3EQf0wJ3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +zdAyT4oBgHgl3EQfO_bl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf filter=lfs diff=lfs merge=lfs -text +MtE4T4oBgHgl3EQf8w72/content/2301.05351v1.pdf filter=lfs diff=lfs merge=lfs -text +xNFQT4oBgHgl3EQfADUm/content/2301.13221v1.pdf filter=lfs diff=lfs merge=lfs -text +2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +VtE3T4oBgHgl3EQf0gtr/content/2301.04738v1.pdf filter=lfs diff=lfs merge=lfs -text +t9AyT4oBgHgl3EQfaPc2/content/2301.00237v1.pdf filter=lfs diff=lfs merge=lfs -text +iNAzT4oBgHgl3EQfM_sr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_dE1T4oBgHgl3EQfogTY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +gdE0T4oBgHgl3EQf6QJf/content/2301.02761v1.pdf filter=lfs diff=lfs merge=lfs -text +VtE3T4oBgHgl3EQf0gtr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +etE0T4oBgHgl3EQf5wIr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ktAyT4oBgHgl3EQf_fpg/content/2301.00909v1.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss b/0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..771614547a26ff9f423128a70bd97db47433add8 --- /dev/null +++ b/0tE2T4oBgHgl3EQf4wij/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0efb3f35d8b73e82bdec5193a7eb1d4809cfb236e15aaba49a1089970017839 +size 6094893 diff --git a/0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf b/0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eba66579d42f028e879230c7d01057e5fd3d5e8d --- /dev/null +++ b/0tFAT4oBgHgl3EQfCRy0/content/2301.08409v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:512fc59ec5567758d031b4109f8a8aff0117bea332c47893f62270694226624c +size 2451766 diff --git a/0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss b/0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..f549e7bc6abc80d793ca2a66be1ed512c1b23371 --- /dev/null +++ b/0tFAT4oBgHgl3EQfCRy0/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4a8b69bc21ce4f85915bce44d737f26bb22e93cb537a2de591ebb19de141640 +size 10879021 diff --git a/0tFIT4oBgHgl3EQf3Cv8/vector_store/index.pkl b/0tFIT4oBgHgl3EQf3Cv8/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f35ad7adaacc07fd49e7f03be1a448cf5eeadc68 --- /dev/null +++ b/0tFIT4oBgHgl3EQf3Cv8/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d247ba1b03845d8814ec23608eef3afddc75707834abf9685bf5432d2ae62688 +size 151912 diff --git a/29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss b/29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..e079cda4ba919cba35a77d640f7b43af81532b7e --- /dev/null +++ b/29AzT4oBgHgl3EQfDvqc/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0399ee2a7214c7bb50c7c3cd8d27f67abd774801860d0aad8f7a8c186bdd60f +size 4194349 diff --git a/29FRT4oBgHgl3EQfnzcq/content/tmp_files/2301.13606v1.pdf.txt b/29FRT4oBgHgl3EQfnzcq/content/tmp_files/2301.13606v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..79593c8d00fe5c880255330d9c251c2a918515b8 --- /dev/null +++ b/29FRT4oBgHgl3EQfnzcq/content/tmp_files/2301.13606v1.pdf.txt @@ -0,0 +1,1307 @@ +Multi-video Moment Ranking with Multimodal Clue +Danyang Hou1,2, Liang Pang1, Yanyan Lan4, Huawei Shen1,3, Xueqi Cheng2,3 +1 Data Intelligence System Research Center, Institute of Computing Technology, CAS, Beijing, China +2 CAS Key Lab of Network Data Science and Technology, +Institute of Computing Technology, CAS, Beijing, China +3 University of Chinese Academy of Sciences, Beijing, China +4 Institute for AI Industry Research, Tsinghua University, Beijing, China +Abstract +Video corpus moment retrieval (VCMR) is the task of re- +trieving a relevant video moment from a large corpus of +untrimmed videos via a natural language query. State-of- +the-art work for VCMR is based on two-stage method. In +this paper, we focus on improving two problems of two-stage +method: (1) Moment prediction bias: The predicted mo- +ments for most queries come from the top retrieved videos, +ignoring the possibility that the target moment is in the +bottom retrieved videos, which is caused by the incon- +sistency of Shared Normalization during training and in- +ference. +(2) Latent key content: Different modalities of +video have different key information for moment localiza- +tion. To this end, we propose a two-stage model MultI-video +raNking with mUlTimodal cluE (MINUTE). MINUTE uses +Shared Normalization during both training and inference +to rank candidate moments from multiple videos to solve +moment predict bias, making it more efficient to predict tar- +get moment. In addition, Mutilmdaol Clue Mining (MCM) +of MINUTE can discover key content of different modali- +ties in video to localize moment more accurately. MINUTE +outperforms the baselines on TVR and DiDeMo datasets, +achieving a new state-of-the-art of VCMR. Our code will be +available at GitHub. +1. Introduction +The rise of video-sharing applications has led to a dra- +matic increase in the number of videos on the Internet. +Faced with such a huge video corpus, users need an accu- +rate retrieval tool to meet the needs of fine-grained cross- +modal information. We have the opportunity to address this +challenge thanks to the recently proposed video corpus mo- +ment retrieval (VCMR) [9, 16] task that requires retrieving +a video moment via a natural language query from a collec- +tion of untrimmed videos, where the moment is a temporal +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +Number of retrieved videos +Moment prediction accuracy +Moment prediction +Video retrieval +Video retrieval accuracy +Figure 1. Moment prediction bias: Video retrieval accuracy im- +proves as the number of retrieved videos increases, indicating that +the probability of predicting the correct moment also increases. +However, when the number of retrieved videos exceeds 2, moment +prediction accuracy hardly increases, which means that predicted +moments for most queries come from the top 2 videos. +segment of a video. VCMR consists of two sub-tasks: video +retrieval (VR) and single video moment retrieval (SVMR). +The goal of VR is to retrieve videos that may contain the +target moment via a natural language query. And SVMR +aims to use the query to localize the target moment in the +retrieved videos. +According to different strategies to learn two sub-tasks, +existing methods can be divided into one-stage method and +two-stage method. One-stage method [16,18,31,32] treats +VCMR as a multi-task learning problem, using a shared +backbone with two different heads to learn VR and SVMR. +Whereas two-stage method [15] leverages a pipeline of two +independent modules to learn the two sub-tasks. Specially, +it first trains a video retriever by query-video pairs to learn +VR, then takes advantage of Shared Normalization (Shared- +Norm) [7] technique to train localizer to learn SVMR, +where the negatives for Shared-Norm are from the training +data sampled by the trained retriever. In inference, it first +uses retriever to select the most relevant K videos from cor- +1 +arXiv:2301.13606v1 [cs.CV] 29 Jan 2023 + +00:47:1200:49:42 +House : or that these two +have green eyes? +00:51:3100:55:14 +Foreman : You're not saying... +They're not brother and sister. +00:43:1800:46:40 +House : Is it a coincidence +that your sister has great hair, +Query: House shows a picture of the patient to his team and they have concluded that maybe the two are not related by blood. +Figure 2. Latent key content: The images with a red border are +visual key content because these are relevant to “House shows a +picture of the patient to his team” in query. The highlighted subti- +tle is textual key content, for it relates to ”they have concluded that +maybe the two are not related by blood”. +pus, then uses localizer to localize the candidate moments in +the K videos. The final predicted moment depends on both +retrieval score and localization score. Two-stage method is +more suitable for VCMR because (1) Shared-Norm can en- +hance the possibility of the target moment appearing in the +correct video. (2) Two-stage method can select models with +different query-video interaction modes in the two mod- +ules. For example, it select late-fusion model as retriever +for fast video retrieval, and leverage early-fusion model as +localizer for accurate moment localization. State-of-the-art +model [15] for VCMR is also based on two-stage method. +However, two problems limit the performance of two- +stage method. The first is Moment prediction bias: as +shown in Fig. 1, the final predicted moments for most +queries are from the top-ranked videos among the K re- +trieved videos. This is counter-intuitive because the more +videos retrieved, the more likely those videos contain the +correct moment. This bias neglects the possibility that the +target moment is in the bottom-ranked videos. The reason +for this bias is that although two-stage method uses Shared- +Norm to normalize the probability of correct moment across +correct video and negative videos, it still only normalizes +the probability of the candidate moments in a single video +during inference. This inconsistency in training and infer- +ence results in the incomparable localization scores of can- +didate moments during inference. Since the final predicted +moment depends on both video retrieval score and moment +localization score, the incomparable localization scores will +make the final moment mainly depend on video retrieval +scores, resulting in the final predicted moment more tend- +ing to come from videos with higher rankings. The sec- +ond problem is Latent key content: the localizer of two- +stage method neglects key content from different modali- +ties for moment localization. Video is usually composed of +multimodal information, such as images (vision) and subti- +tles (text). As shown in Fig. 2, visual information and tex- +tual information have different emphases, if we can find out +the important visual information and textual information as +clues, it will help better moment localization. +In this paper, we propose MultI-video raNking with +mUlTimodal cluE (MINUTE) to improve the two prob- +lems of two-stage method. For the first problem, we keep +the consistence of Shared-Norm between training and in- +ference, which forces the localization scores of candidate +moments among multiple videos retrieved by retriever to be +comparable during inference. On this basis, we derive a +new scoring function to rank the candidate moments, which +can combine the scores of video retrieval and moment lo- +calization more effectively. For the second problem, we +propose an early-fusion localizer with a Multimodal Clue +Mining (MCM) component which can discover key content +from different modalities to help moment localization. Spe- +cially, MCM first uses query to measure the importance of +all images and subtitles in the video, then assigns weights +to these elements according to their importance. The ele- +ments with high importance can be seen as key clues to im- +prove moment localization. Then we feed weighted video +representation together with query representation to a mul- +timodal Transformer that captures deeper interactions be- +tween video and query to predict moments. +We conduct extensive experiments on TVR and DiDeMo +datasets. The experimental results show that our proposed +MINUTE outperforms other baselines, achieving a new +state-of-the-art result. Ablation experiments verify that our +method improves the two problems of two-stage method. +2. Related Work +We first briefly introduce works related to two sub-tasks +of VCMR. After that, we introduce recent works for VCMR +in detail. +Text-video retrieval is a cross-modal retrieval task whose +goal is to retrieve relevant videos from a corpus through +a natural language query. +This task is similar to VR of +VCMR task, but most content of the video in the former +is relevant to the query, while only a small part of the con- +tent of the video in the latter is relevant to the query. The +works for text-video retrieval can be divided into two cat- +egories depending on the interaction mode between query +and video, e.g., late fusion and early fusion. Late-fusion +methods [8,21,27] use two separated encoders to embed im- +ages and videos into a shared semantic space. These models +can be very efficient if we calculate and index each modal +representation offline, for only the similarity between video +and query should be applied in inference. +Early-fusion +methods [6,12,25] make fine-grained interactions between +video and query with an attention mechanism [2,24] to im- +prove retrieval accuracy. +Temporal language grounding is a task similar to SVMR, +which requires localizing a moment from a video given a +natural language query. Temporal language grounding can +be seen as a special case of VCMR, with only one video +in the corpus for each query. According to the way of pre- +dicting moment, the existing works for temporal language +grounding can be divided into proposal-based and proposal- +2 + +free. Proposal-based method [3,5,13,19,26,34] first gener- +ates several proposals as candidates and then ranks the pro- +posals according to their matching degree with the query, +and the proposal with the highest matching degree is re- +garded as the answer. Unlike the proposal-based method, +proposal-free method [4,17,29,30,33] directly predicts the +start and end times of the moment without pre-extracting +proposals as candidates. +Video corpus moment retrieval is first proposed by [9], +then [16] propose a new dataset TVR for VCMR who ex- +tends the uni-modal video (image) in the previous dataset +video to multiple modalities (image and subtitle). The ex- +isting works for VCMR can be divided into two categories +depending on how they learn the two sub-tasks, e,g., one- +stage [16, 18, 31, 32] method and two-stage method [15]. +The one-stage method treats VCMR as a multi-task learn- +ing problem , using a shared model with two different heads +to learn VR and SVMR simultaneously. XML [16] is the +first one-stage method for VCMR who uses a late-fusion +model to encode video and query separately and then uses +two different heads to learn the two tasks. ReLoCLNet [32] +leverage contrastive learning to enhance the performance +of XML. [18] also follows XML and proposes a video- +language pre-train model HERO, which significantly im- +proves the performance. HAMMER [31] is an early-fusion +one-stage model that uses attention to make deep inter- +actions between query and video for more accurate mo- +ment retrieval. Two-stage method leverages two different +modules to learn two sub-tasks. +CONQUER [15] is the +only two-stage method that uses video retrieval heads of +HERO [18] as the retriever and proposes a model based on +context-query attention (CQA) [28] as the localizer. CON- +QUER achieves state-of-the-art results on VCMR. In train- +ing, CONQUER uses Shared-Norm [7] technique to train +localizer. In inference, CONQUER first uses a video re- +triever to retrieve top-K videos, then uses a moment local- +izer to localize the moment in the retrieved videos. Two- +stage method is more suitable for VCMR, but it suffers from +moment prediction bias and latent key content. In this +paper, we focus on improving the two problems. +3. Background +We first formulate VCMR, then describe two-stage +method, followed by analyzing moment prediction bias. +3.1. Task Formulation +We denote a corpus of videos V = {v1, v2, ..., v|V|} +where |V| is the number of videos in corpus and vi = +{f 1 +i , f 2 +i , ..., f |vi| +i +} the i-th video which contains |vi| frames. +Each frame f j +i consists of an image and a subtitle (Ij +i , sj +i). +Note that if it contains no subtitle, sj +i is set to empty. +Given a natural language query q = {w1, w2, ..., w|w|} +which consists of a sequence of words, the goal of VCMR +is to retrieve most relevant moment m∗ from V. The target +moment m∗ is a temporal segment (τ∗,st, τ∗,ed) in video v∗, +where v∗ denotes the video that contains the target moment +whose start and end timestamps are τ∗,st and τ∗,ed respec- +tively. +The goal of VCMR can be seen as maximizing the prob- +ability of target moment m∗ given the query q and the video +corpus V: +m∗ = argmax +m +P(m|q, V). +(1) +According to the chain rule of conditional probability: +P(m∗|q, V) = P(m∗|v∗, q) · P(v∗|q, V), +(2) +where P(v∗|q, V) and P(m∗|v∗, q) are the probabilities of +retrieving a video v∗ from corpus V and localizing the target +moment m∗ in the retrieved video respectively. The proba- +bility of target moment depends on the probabilities of start +and end timestamps: +P(m∗|v∗, q) = Pst(τ∗,st|v∗, q) · Ped(τ∗,ed|v∗, q). +(3) +3.2. Two-stage Method +Two-stage method uses a video retriever to model +P(v∗|q, V) and a moment localizer to model P(m∗|v∗, q). +In training, two-stage method use margin-based loss [10] +to train video retriever, then use Shared-Norm to train mo- +ment localizer. Specially, for a query, there is a positive +video v+ whose moment (τ+,j, τ+,k) is ground truth and n +negative videos {v− +1 , v− +2 , . . . , v− +n } that do not contain target +moment. Shared-Norm is leveraged to normalize the prob- +abilities of τ∗,j as start time and τ∗,k as end time across all +frames in positive video and negatives, such as: +Pst(τ+,j|v+, q) = +exp(lst ++,j) +n+1 +� +a=1 +|vb| +� +b=1 +exp(lst +a,b) +, +(4) +where lst +a,b is the the logits that b-th frame in video va is start +timestamp of ground truth moment, and |vb| is the number +of frame in a video. Training with Shared-Norm enhances +the possibility of the target moment existing in the correct +video. +In inference, the retriever first uses the query to retrieve +top-K videos from the corpus, then the localizer localizes +the target moment in the retrieved videos. The score of the +final predicted moment (τi,j, τi,k) in video i with start time +j and end time k depends on both retrieval score and local- +ization score, the scoring function is: +Si,jk = exp(α · SR +i ) · SL +i,jk, +(5) +where Si,jk is the final score of the predicted moment, SR +i is +the retrieval score of video vi , and SL +i,jk is the localization +3 + +score of a moment in a video, and α is a hyper-parameter to +encourage the target moment from top retrieved videos. The +retrieval score is computed by cosine similarity between +query representation and video representation. And the lo- +calization score is computed by the probability of a moment +in a single video: +SL +i,jk = Pst(τi,j|vi, q) · Ped(τi,k|vi, q), +(6) +where Pst(τi,j|vi, q) or Ped(τi,k|vi, q) is normalized across +a single video : +Pst(τi,j|vi, q) = +exp(lst +i,j) +|vi| +� +b=1 +exp(lst +i,b) +, +(7) +3.3. Moment Prediction Bias +As shown in Fig. 1, the final predicted moments of +two-stage method for most queries come from top-ranked +videos. +This bias limits the performance of two-stage +method on VCMR, because it neglects the possibility of the +target moment existing in the bottom-ranked videos. We +conjecture that this bias mainly comes from the inconsis- +tency of normalization during training and inference, shown +in Eq. (4) and Eq. (7). +In training, it uses Shared-Norm to highlight the signif- +icance of the correct moment being in the correct video. +Nevertheless, in inference, this probability is based on ev- +ery single video, resulting in the predicted candidate mo- +ments from different videos being incomparable, so the sig- +nificance no longer exists. Therefore, the score of the final +predicted moment in Eq. (5) is more dependent on video +retrieval score, making the final predicted moment more +likely to be from the top-ranked videos. +4. Method +We first illustrate how we improve moment prediction +bias. Then we introduce the proposed model MINUTE, we +emphasize multimodal clue mining component. Finally, we +describe the training of MINUTE. +4.1. Multi-video Moment Ranking in Prediction +We propose to adopt Shared-Norm in inference, so that +the localization scores of candidate moments from multiple +videos are comparable, which can enhance the influence of +moment localization score SL +i,jk on the final score Si,jk to +improve moment prediction bias. Furthermore, we derive +a new scoring function from Eq. (2) to combine the video +retrieval and moment localization scores more effectively. +Specially, to compute P(v∗|q, V), we obtain video repre- +sentation vi = {f 1 +i , f 2 +i , ..., f |vi| +i +} and query representation +q. In the following paper, we use bold notations to denote +vectors. The j-th frame representation f j +i consists of image +representation and subtitle representation (Ij +i , sj +i). Query +also has two representations (qI, qs) to compute similarity +scores for images and subtitles respectively. The query and +video representations details are in Sec. 4.2.1. +Because only part of the content in the video is related to +the query, the similarity score between the query and video +SR +i +is the average of max-pooling of query-image scores +and max-pooling of query-subtitle scores. We use the inner +product as the similarity score sim(): +sim(qc, cj +i) = qcT · cj +i, c ∈ {I, s}, +φc = +max +1≤j≤|vi| sim(qc, cj +i), +SR +i = φI + φs +2 +. +(8) +The probability P(v∗|q, V) is computed by softmax nor- +malized score across all query-video scores in corpus: +P(v∗|q, V) = +exp(SR +∗ ) +�|V| +j=1 exp(SR +j ) +. +(9) +Computing the inner product between query and all videos +in the corpus is computationally intensive, so we em- +ploy Max Inner Product Search (MIPS) [22] to find top-K +videos to approximate the probability. The calculation of +P(v∗|q, V) in Eq. (9) can be approximated by P(v∗|q, V∗): +P(v∗|q, V) ≈ P(v∗|q, V∗) = +exp(SR +∗ ) +�K +j=1 exp(SR +j ) +. +(10) +The probabilities of the rest videos in the corpus are con- +sidered close to 0. The training of the retriever is to maxi- +mize the log-likelihood of probability logP(v∗|q, V), which +is different from the previous two-stage method who use +margin-based loss. +As for P(m∗|v∗, q), we use Shared-Norm in inference, +which is consistent with that in training to improve moment +prediction bias: +P(m∗|v∗, q) ≈ P(m∗|V∗, q) = +exp(lst +∗,j) +K +� +a=1 +|vi| +� +b=1 +exp(lst +a,b) +· +exp(led +∗,k) +K +� +a=1 +|vi| +� +b=1 +exp(led +a,b) +. +(11) +A well-trained localizer should suppress the probability that +the target moment appears in the wrong videos to close to +zero, so P(m∗|V∗, q) approximately equals to P(m∗|v∗, q). +The details of logits lst +∗,j are introduced in Sec. 4.2.2. +Combine Eq. (2), Eq. (10) and Eq. (11), the probability +P(m∗|v∗, q) can be computed by: +P(m∗|v∗, q) ≈ +exp(SR +∗ ) +�K +j=1 exp(SR +j ) +exp(lst +∗,j) +K +� +a=1 +|vi| +� +b=1 +exp(lst +a,b) +exp(led +∗,k) +K +� +a=1 +|vi| +� +b=1 +exp(led +a,b) +, (12) +4 + +where the denominator is the same for all candidate mo- +ments from K videos, so we can simplify this probability to +a new scoring function: +S∗ = SR +∗ + lst +∗,j + led +∗,k, +(13) +where lst +∗,j + led +∗,k = SL +∗,ij represents moment localization +score. This scoring function is simpler than Eq. (5) and +without hyper-parameter α which may greatly increase the +weight of the top-ranked video retrieval score. +In inference, we use scoring function in Eq. (13) to rank +all moments in multiple retrieved videos. +4.2. Model +We propose a two-stage MINUTE model consisting of a +late-fusion video retriever and an early-fusion moment lo- +calizer. +4.2.1 +Video Retriever +The goal of video retriever is to select a small subset V∗ +from the corpus V given the query q, where videos in the +subset may contain the target moment. The retriever of the +proposed model is a late-fusion model that contains two en- +coders, a query encoder and a video encoder, as shown in +Fig. 3. The late-fusion architecture ensures retrieval effi- +ciency if we index the representations of videos in advance. +Video Encoder The video encoder encodes frames in the +i-th video to frame representations vi = {f 1 +i , ..., f |vi| +i +}, +where the j-th frame f j +i contains image representation Ij +i +and subtitle representation sj +i. We first use RoBERTa [20] +to extract sentence features of subtitle and use Slow- +Fast [11] and ResNet [14] to extract image features. Then +we feed subtitle features and image features to a one- +layer multi-modal Transformer that simultaneously cap- +tures intra-modal and inter-modal dependencies to output +each image representation Ij +i and subtitle representation sj +i. +Query Encoder The query encoder convert query q = +{w1, w2, ..., w|q|} to query representation q. We first use +RoBERTa to extract the feature wj of each word in the +query. A one-layer Transformer is used to capture the con- +textual representation of each word. We generate two query +representations for query-image similarity score and query- +subtitle similarity score, denoted as qI and qs. We adopt +a modular pooling mechanism [16] to convert the sequence +representations to the two vectors: +oi = Wcwi, αi = +exp(oi) +|q| +� +j=1 +exp(oj) +, qc = +|q| +� +i=1 +αiwi, +(14) +where Wc is learnable parameters, c ∈ {I, s}. The modular +mechanism can be regarded as a learnable pooling and is +also used in previous works [16,18,32]. +Query: Foreman tells Enid +why he had to sedate the +patient. +Transformer +Corpus +Enid : Did he need a +sedative? I did. +00:48:4500:52:12 +subtitles +images +SlowFast +ResNet +RoBERTa +PE +Multimodal Transformer +RoBERTa +ME +ME +PE +Query Encoder +Video Encoder +Video +𝑰1 +𝑰2 +𝑰|𝑣| +𝒔1 +𝒔2 +𝒔|𝑣| +Modular +Pooling +𝒒𝐼 +𝒒𝑠 +Figure 3. Video retriever consists of two encoders, video encoder +and query encoder. ’ME’ and ’PE’ represent modality embedding +and positional embedding, respectively. +We also use the retrieval head of HERO [18] as retriever +for a fair comparison with CONQUER [15]. The original +HERO uses margin-based loss [10] to train video retrieval +whose retrieval score only represents cosine similarity be- +tween query and videos, so we re-train HERO in the same +way as training the proposed retriever to model the proba- +bility P(v∗|q, V) in Eq. (10). We use simple retriever to +denote the proposed retriever and HERO retriever to de- +note the retriever based on HERO. +4.2.2 +Moment Localizer +Moment localizer shown in Fig. 4 uses the query to localize +the target moment m∗ in the top-K retrieved videos V∗. The +proposed localizer is based on early-fusion architecture to +explore deeper interactions between query and video. Be- +cause the retrieved videos are narrowed down to a small +range, the amount of computations is acceptable. +The localizer first uses query encoder to get token rep- +resentations { ¯ +w1, ..., ¯ +w|q|} and video encoder to get video +representation ¯vi = { ¯ +f 1 +i , ..., ¯ +f |vi| +i +}, where ¯ +f j +i contain an +image representation and a subtitle representation (¯Ij +i , ¯sj +i). +Video encoder and query encoder in localizer are same with +those in retriever but do not share parameters. +Our proposed localizer consists of two components: +5 + +query +Transformer +Modular +Pooling +Video +Encoder +Multimodal Clue Mining +Multimodal Transformer +FC +Query encoder +ഥ𝒒𝐼 +ഥ𝒒𝑠 +ത𝑰 +ത𝒔 +෠𝑰 +ො𝒔 +෠𝒇 +ഥ𝒘 +video +1D +Conv +1D +Conv +𝑙𝑠𝑡 +𝑙𝑒𝑑 +ഥ𝒘 +Figure 4. Moment localizer contains two components, multimodal +clue mining and multimodal Transformer. For brevity, we omit the +subscripts of the representations. +multimodal clue mining and multi-modal Transformer. +Multimodal Clue Mining (MCM) solves late key content +problem by discovering important content from multiple +modalities of video to help moment localization. +MCM +first uses query to measure the importance of each image +and subtitle in video, then assigns weights to these elements +from different modalities according to importance. +Specially, we leverage modular pooling to obtain query +representations ¯qI and ¯qs to measure image importance and +subtitle importance respectively. The importance is com- +puted by: +pj +c = ( ¯ +Wc¯cj) ⊙ ¯qc, c ∈ {I, s}, +(15) +where ¯ +Wc is learnable parameters, and pj +c is the importance +of j-th image or subtitle. Then we use the importance to +weight the image and subtitle representations: +ˆcj = norm(pj +c) ⊙ ¯cj, c ∈ {I, s}, +(16) +where ˆcj is weighted image representation or subtitle rep- +resentation and norm is L2-normalization which makes the +model converge better. +MCM can be seen as an amplifier that allows localizer to +focus on important content which we call clues from multi- +ple modalities. +We fuse the weighted representations ˆIj and ˆsj in a +frame by a fully-connect layer: +ˆ +f j = FC([ˆIj; ˆsj]), +(17) +where [; ] is concatenation and ¯f j is the fused represen- +tation the j-th frame. +The fused video representation is +ˆvi = { ˆ +f 1 +i , ..., ˆ +f |vi| +i +} and are fed to a multimodal Trans- +former together with query token representations. +Multimodal Transformer (MMT) We use a three-layer +multi-modal Transformer to make deep interactions be- +tween fused video representation and token representations. +In addition, two 1D-convolution layers are leveraged to cap- +ture dependencies between adjacent frames and output log- +its lst +i,j, led +i,k of the start and end times of the target moment. +4.3. Training +We first train retriever by text-video pairs, then use the +trained retriever to sample negative videos as hard negatives +to train localizer. +Training retriever To maximize the log-likelihood of prob- +ability logP(v∗|q, V) in Eq. (9), we adopt InfoNCE [23] +loss with in-batch negative sampling to train retriever. Spe- +cially, let d = {(v1, q1), ..., (vb, qb)} denote training data in +a batch, where b is batch size. Each pair (vi, qi) in d has +b − 1 negative samples for query-to-video loss or video-to- +query loss, such (vz, qi)z̸=i and (vi, qz)z̸=i: +Lv = −log +exp(SR +i,i) +b� +z=1 +exp(SR +z,i) +, Lq = −log +exp(SR +i,i) +b� +z=1 +exp(SR +i,z) +, +(18) +where Lv and Lq are query-to-video loss and video-to- +query loss, respectively. We use the sum of the two losses +to train retriever. +Training localizer We use the well-trained retriever to re- +trieve top-ranked videos from training data and sample n +videos as hard negatives to train the localizer with Shared- +Norm technique. +Lst = −log +exp(lst ++,j) +n+1 +� +a=1 +|vb| +� +b=1 +exp(lst +a,b) +, Led = −log +exp(led ++,k) +n+1 +� +a=1 +|vb| +� +b=1 +exp(led +a,b) +, +(19) +The sum of Lst and Led are used to train localizer. +5. Experiment +We first introduce datasets and metrics. Then we de- +scribe implementation details. After that, we introduce ex- +6 + +Table 1. Comparisons of VCMR results(IoU=0.7) with baselines +on TVR validation set and testing set.’SR’ denotes simple re- +triever, and ’HR’ denotes HERO retriever. +Model +Validation +Testing +R1 +R10 +R100 +R1 +R10 +R100 +XML +2.62 +9.05 +22.47 +3.32 +13.41 +30.52 +ReLoCLNet +4.15 +14.06 +32.42 +- +- +- +HAMMER +5.13 +11.38 +16.71 +- +- +- +HERO +5.13 +16.26 +24.55 +6.21 +19.34 +36.66 +CONQUER +7.76 +22.49 +35.17 +9.24 +28.67 +41.98 +MINUTE(SR) +8.17 +23.38 +37.93 +9.59 +28.96 +45.23 +MINUTE(HR) +10.70 +29.37 +45.09 +12.60 +33.72 +50.23 +Table 2. +Comparisons of VCMR results with baselines on +DiDeMo testing set. +Model +IoU=0.5 +IoU=0.7 +R1 +R5 +R10 +R1 +R5 +R10 +XML +2.36 +- +10.42 +1.59 +- +6.77 +HERO +3.37 +8.97 +13.26 +2.76 +7.73 +11.78 +CONQUER +3.31 +9.27 +13.99 +2.79 +8.04 +11.90 +MINUTE(HR) +3.44 +9.62 +14.62 +2.81 +7.89 +12.03 +perimental results comparison with baselines. Then we il- +lustrate ablation studies of the proposed model. Finally, we +present the case study. +5.1. Datasets +TVR [16] is built on TV Shows whose videos consist of +images and subtitles. TVR contains 17435, 2179, and 1089 +videos on the training, validation, and testing sets. The av- +erage length of the videos is 76.2 seconds, while the average +length of the moments is 9.1 secs. +DiDeMo [1] is a dataset whose videos are from the real +world, with only images and no subtitles in the video. +DiDeMo contains 8395, 1065, and 1004 training, valida- +tion, and testing videos, respectively. The average duration +of videos and moments is 54 secs and 6.5 secs, respectively. +5.2. Evaluation Metrics +We follow the metrics in [16] as evaluation metrics of ex- +periments. For VCMR task, the evaluation metric is R@K, +IoU=p that represents the percentage that at least one pre- +dicted moments whose Intersection over Union(IoU) with +the ground truth exceed p in the top-K retrieved moments. +The two sub-tasks are also evaluated. The metric of SVMR +task is the same as that of VR task, but the evaluation is +conducted in only ground truth video for each query. As for +VR task, the metric is R@K which denotes the percentage +that correct video is in the top-K ranked videos. +5.3. Implementation Details +Training We train simple retriever for 100 epochs with the +batch size 256. As for localizer, we sample 4 and 2 negative +Table 3. Comparisons of VR results with baselines on TVR vali- +dation set. +Model +R@1 +R@5 +R@10 +R@100 +XML +16.54 +38.11 +50.41 +88.22 +ReLoCLNet +22.13 +45.85 +57.25 +90.21 +HERO +29.01 +52.82 +63.07 +89.91 +SR +23.12 +46.86 +57.83 +90.22 +HR +32.88 +55.62 +65.35 +91.26 +Table 4. Comparisons of SVMR results with baselines on TVR +Validation set. +Model +IoU=0.5 +IoU=0.7 +R1 +R10 +R100 +R1 +R10 +R100 +XML +31.43 +- +- +13.89 +- +- +ReLoCLNet +31.88 +- +- +15.04 +- +- +HERO +32.22 +60.08 +80.66 +15.30 +40.84 +63.45 +CONQUER +43.63 +- +- +22.84 +- +- +MINUTE(SR) +44.49 +78.62 +93.57 +23.98 +61.30 +80.13 +MINUTE(HR) +44.74 +78.90 +93.80 +24.08 +62.10 +80.45 +videos for each query from top-100 ranked videos on TVR +and DiDeMo respectively, and train it for 10 epochs with the +batch size 32. Both simple retriever and localizer are trained +by AdamW with the learning rate 0.0001 and the weight +decay of 0.01 in a single 3090 GPU. For HERO retriever, +we retrain it with InfoNCE loss in 8 3090 GPUs with the +same setting as the original HERO [18]. +Inference The localizer localizes the target moment in the +top-10 retrieved videos. The length of predicted moments is +limited to [1, 24] and [1, 7] for TVR and DeDiMo, respec- +tively. We use non-maximum suppression(NMS) with the +IoU 0.7 to post-process the predicted moments. +5.4. Comparison with Baselines +We compare the proposed model with baselines on +VCMR task including four one-stage models XML [16], +ReLoCLNet [32], HAMMER [31], HERO [18] and a two- +stage model CONQUER [15]. +TVR As shown in Tab. 1, the proposed models outperform +all baseline methods. +Compared with the best previous +method CONQUER who also uses HERO to address the +VR task, our proposed model with HERO retriever achieves +36% improvement at R@1 on the testing set. We also re- +port the results on two sub-task in Tab. 3 and Tab. 4. For +VR, HERO retriever trained by InfoNCE loss has better re- +trieval accuracy than the original HERO. For SVMR, our +proposed models also achieve the best results. It is worth +noting that the proposed model with simple retriever out- +performs CONQUER on VCMR even though the perfor- +mance of VR(R@1 23.12) is much worse than that in CON- +QUER(R@1 29.01). This is because moment prediction +bias limits the performance of CONQUER. +DiDeMo We report the VCMR results on DiDeMo testing +7 + +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +Number of retrieved videos +7 +8 +9 +10 +R@1, IOU=0.7 +MINUTE(HR) +CONQUER +CONQUER* +Figure 5. The performances of VCMR of our model and COU- +QUER under different numbers of the retrieved videos, where +’CONQUER*’ denotes CONQUER with our retriever and scoring +function. +Table 5. Performances of VCMR and SVMR (R@1, IOU=0.5,0.7) +when remove two componets in localizer. MCM denotes multi- +modal clue mining, and MMT represents multimodal Transformer. +Model +VCMR +SVMR +0.5 +0.7 +0.5 +0.7 +MINUTE(HR) +19.22 +10.70 +44.74 +24.08 +w/o MCM +18.21 +10.17 +43.41 +23.46 +w/o MMT +16.71 +8.66 +40.5 +20.97 +set in Tab. 2. The performance of the proposed model is still +better than others. All the methods perform worse than the +results on TVR because the DiDeMo dataset is designed for +temporal language grounding, so the difficulty of retrieving +video is not considered. The query of DiDeMo is not as spe- +cific as that of TVR, such as ”a girl is playing ball”, making +it hard to retrieve the correct video. +5.5. Moment Prediction Bias +As shown in Fig. 5, when the number of retrieved videos +increases, the performance of our model improves, but the +CONQUER does not change much, which indicates that +moment prediction bias limits its performance. This bias +is from the inconsistency of Shared-Norm in training and +inference. +Our prediction based on the scoring function +in Eq. (13) addresses this prediction bias by ranking mo- +ments in multiple retrieved videos in inference. When we +replace CONQUER’s retriever and scoring function with +ours, CONQUER* in Fig. 5 can also improve moment pre- +diction bias, showing the proposed model’s effectiveness. +5.6. Multimodal Clue Mining +We perform ablation studies on the effectiveness of two +components of localizer in Tab. 5. When removing MCM, +the accuracy drops, which shows that discovering key con- +tent from images and subtitles as clue is helpful for moment +localization. When we only use MCM, the accuracy drops +a lot, indicating that using clues is not enough, fine-grained +cross-modal interactions are also needed. +37.5s +MINUTE +38.01s +40.91s +24.00s +27.00s +Ground Truth +CONQUER +43.50s +Query: Amy and Bernadette spin around on their bar seats to face the other way. +00:35:0100:37:39 +Bandleader : Mr. and +Mrs. Chandler Bing. +00:30:5100:34:51 +Bandleader : Ladies and gentlemen, it gives +me great pleasure to introduce to you : +31.46s +43.40s +Ground Truth +30.00s +42.00s +MINUTE +Query: The bandleader announces Chandler and Monica and they walk into the room. +30.00s +36.00s +CONQUER +Figure 6. Two cases on TVR from the proposed model and CON- +QUER. +5.7. Case Study +We show two cases of VCMR in Fig. 6. In the first case, +two models retrieve the correct video first, the moment pre- +dicted by the proposed model is closer to the ground truth. +The proposed model captures key images related to ”they +walk into the room” to help localize the moment, indicating +the effectiveness of MCM in our model. In the second case, +both models rank the wrong video first because the scenario +in this video is similar to that in the correct video. CON- +QUER fails to predict correct moment from correct video, +for it places too much emphasis on top-ranked videos. Our +proposed model can predict correct moment, which verifies +that our prediction improves moment prediction bias. +6. Conclusion +In this paper, we propose a model MultI-video raNking +with mUlTimodal cluE (MINUTE) improving two prob- +lems of two-stage method on video corpus moment retrieval +task, moment prediction bias and latent key content. We +first analyze the reason for moment prediction bias that in- +consistency of Shared-Norm in training and inference, then +we adopt Shared-Norm in inference and rank moments in +multiple videos based on our derived scoring function to +improve moment prediction bias. As for latent key con- +tent, we propose a multimodal clue mining component to +discover important content from two modalities of video as +clue for better moment localization. Extensive experiments +on two datasets TVR and DiDeMo show that our proposed +model improves two problems and achieves a new state-of- +the-art of video corpus moment retrieval task. +References +[1] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef +Sivic, Trevor Darrell, and Bryan Russell. Localizing mo- +8 + +ments in video with natural language. In ICCV, 2017. 7 +[2] Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. +Neural machine translation by jointly learning to align and +translate. In ICLR, 2015. 2 +[3] Jingyuan Chen, Xinpeng Chen, Lin Ma, Zequn Jie, and Tat- +Seng Chua. Temporally grounding natural sentence in video. +In EMNLP, 2018. 3 +[4] Long Chen, Chujie Lu, Siliang Tang, Jun Xiao, Dong Zhang, +Chilie Tan, and Xiaolin Li. Rethinking the bottom-up frame- +work for query-based video localization. In AAAI, 2020. 3 +[5] Shaoxiang Chen and Yu-Gang Jiang. Semantic proposal for +activity localization in videos via sentence query. In AAAI, +2019. 3 +[6] Shizhe Chen, Yida Zhao, Qin Jin, and Qi Wu. Fine-grained +video-text retrieval with hierarchical graph reasoning. +In +CVPR, 2020. 2 +[7] Christopher Clark and Matt Gardner. Simple and effective +multi-paragraph reading comprehension. In ACL, 2018. 1, 3 +[8] Jianfeng Dong, Xirong Li, Chaoxi Xu, Xun Yang, Gang +Yang, Xun Wang, and Meng Wang. Dual encoding for video +retrieval by text. TPAMI, 2021. 2 +[9] Victor Escorcia, +Mattia Soldan, +Josef Sivic, +Bernard +Ghanem, and Bryan C. Russell. +Temporal localization of +moments in video collections with natural language. CoRR, +2019. 1, 3 +[10] Fartash Faghri, David J Fleet, Jamie Ryan Kiros, and Sanja +Fidler. Vse++: Improving visual-semantic embeddings with +hard negatives. arXiv preprint arXiv:1707.05612, 2017. 3, 5 +[11] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and +Kaiming He. Slowfast networks for video recognition. In +ICCV, 2019. 5 +[12] Valentin Gabeur, Chen Sun, Karteek Alahari, and Cordelia +Schmid. +Multi-modal transformer for video retrieval. +In +ECCV. Springer, 2020. 2 +[13] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. +Tall: Temporal activity localization via language query. In +ICCV, 2017. 3 +[14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. +Deep residual learning for image recognition. +In CVPR, +2016. 5 +[15] Zhijian Hou, Chong-Wah Ngo, and Wing Kwong Chan. +Conquer: Contextual query-aware ranking for video corpus +moment retrieval. In ACM MM, 2021. 1, 2, 3, 5, 7 +[16] Jie Lei, Licheng Yu, Tamara L Berg, and Mohit Bansal. Tvr: +A large-scale dataset for video-subtitle moment retrieval. In +ECCV, 2020. 1, 3, 5, 7 +[17] Kun Li, Dan Guo, and Meng Wang. +Proposal-free video +grounding with contextual pyramid network. In AAAI, 2021. +3 +[18] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, +and Jingjing Liu. Hero: Hierarchical encoder for video+ lan- +guage omni-representation pre-training. In EMNLP, 2020. +1, 3, 5, 7 +[19] Daizong Liu, Xiaoye Qu, Jianfeng Dong, Pan Zhou, Yu +Cheng, Wei Wei, Zichuan Xu, and Yulai Xie. Context-aware +biaffine localizing network for temporal sentence grounding. +In CVPR, 2021. 3 +[20] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar +Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettle- +moyer, and Veselin Stoyanov. Roberta: A robustly optimized +bert pretraining approach. arXiv preprint arXiv:1907.11692, +2019. 5 +[21] Mandela Patrick, Po-Yao Huang, Yuki Asano, Florian +Metze, Alexander G Hauptmann, Joao F Henriques, and An- +drea Vedaldi. Support-set bottlenecks for video-text repre- +sentation learning. In ICLR, 2020. 2 +[22] Anshumali Shrivastava and Ping Li. Asymmetric lsh (alsh) +for sublinear time maximum inner product search (mips). In +NeurIPS, 2014. 4 +[23] Aaron Van den Oord, Yazhe Li, and Oriol Vinyals. Repre- +sentation learning with contrastive predictive coding. arXiv +e-prints, pages arXiv–1807, 2018. 6 +[24] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko- +reit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia +Polosukhin. Attention is all you need. NeurIPS, 2017. 2 +[25] Xiaohan Wang, Linchao Zhu, and Yi Yang. T2vlad: global- +local sequence alignment for text-video retrieval. In CVPR, +2021. 2 +[26] Shaoning Xiao, Long Chen, Jian Shao, Yueting Zhuang, and +Jun Xiao. Natural language video localization with learnable +moment proposals. In EMNLP, 2021. 3 +[27] Xun Yang, Jianfeng Dong, Yixin Cao, Xun Wang, Meng +Wang, and Tat-Seng Chua. Tree-augmented cross-modal en- +coding for complex-query video retrieval. In SIGIR, pages +1339–1348, 2020. 2 +[28] Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui +Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. +Qanet: +Combining local convolution with global self- +attention for reading comprehension. In ICLR, 2018. 3 +[29] Yitian Yuan, Tao Mei, and Wenwu Zhu. To find where you +talk: Temporal sentence localization in video with attention +based location regression. In AAAI, 2019. 3 +[30] Runhao Zeng, Haoming Xu, Wenbing Huang, Peihao Chen, +Mingkui Tan, and Chuang Gan. Dense regression network +for video grounding. In CVPR, 2020. 3 +[31] Bowen Zhang, Hexiang Hu, Joonseok Lee, Ming Zhao, +Sheide Chammas, Vihan Jain, Eugene Ie, and Fei Sha. A +hierarchical multi-modal encoder for moment localization in +video corpus. arXiv preprint arXiv:2011.09046, 2020. 1, 3, +7 +[32] Hao Zhang, Aixin Sun, Wei Jing, Guoshun Nan, Liangli +Zhen, Joey Tianyi Zhou, and Rick Siow Mong Goh. Video +corpus moment retrieval with contrastive learning. In SIGIR, +2021. 1, 3, 5, 7 +[33] Hao Zhang, Aixin Sun, Wei Jing, and Joey Tianyi Zhou. +Span-based localizing network for natural language video lo- +calization. In ACL, 2020. 3 +[34] Mingxing Zhang, Yang Yang, Xinghan Chen, Yanli Ji, Xing +Xu, Jingjing Li, and Heng Tao Shen. Multi-stage aggregated +transformer network for temporal language localization in +videos. In CVPR, 2021. 3 +9 + diff --git a/29FRT4oBgHgl3EQfnzcq/content/tmp_files/load_file.txt b/29FRT4oBgHgl3EQfnzcq/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..87c58d09b3e243d9c6fe10978d86caade2b5d916 --- /dev/null +++ b/29FRT4oBgHgl3EQfnzcq/content/tmp_files/load_file.txt @@ -0,0 +1,622 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf,len=621 +page_content='Multi-video Moment Ranking with Multimodal Clue Danyang Hou1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Liang Pang1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Yanyan Lan4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Huawei Shen1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Xueqi Cheng2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='3 1 Data Intelligence System Research Center,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Institute of Computing Technology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' CAS,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Beijing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' China 2 CAS Key Lab of Network Data Science and Technology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Institute of Computing Technology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' CAS,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Beijing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' China 3 University of Chinese Academy of Sciences,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Beijing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' China 4 Institute for AI Industry Research,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Tsinghua University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Beijing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' China Abstract Video corpus moment retrieval (VCMR) is the task of re- trieving a relevant video moment from a large corpus of untrimmed videos via a natural language query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' State-of- the-art work for VCMR is based on two-stage method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In this paper, we focus on improving two problems of two-stage method: (1) Moment prediction bias: The predicted mo- ments for most queries come from the top retrieved videos, ignoring the possibility that the target moment is in the bottom retrieved videos, which is caused by the incon- sistency of Shared Normalization during training and in- ference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (2) Latent key content: Different modalities of video have different key information for moment localiza- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' To this end, we propose a two-stage model MultI-video raNking with mUlTimodal cluE (MINUTE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' MINUTE uses Shared Normalization during both training and inference to rank candidate moments from multiple videos to solve moment predict bias, making it more efficient to predict tar- get moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In addition, Mutilmdaol Clue Mining (MCM) of MINUTE can discover key content of different modali- ties in video to localize moment more accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' MINUTE outperforms the baselines on TVR and DiDeMo datasets, achieving a new state-of-the-art of VCMR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Our code will be available at GitHub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Introduction The rise of video-sharing applications has led to a dra- matic increase in the number of videos on the Internet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Faced with such a huge video corpus, users need an accu- rate retrieval tool to meet the needs of fine-grained cross- modal information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We have the opportunity to address this challenge thanks to the recently proposed video corpus mo- ment retrieval (VCMR) [9, 16] task that requires retrieving a video moment via a natural language query from a collec- tion of untrimmed videos, where the moment is a temporal 1 2 3 4 5 6 7 8 9 10 Number of retrieved videos Moment prediction accuracy Moment prediction Video retrieval Video retrieval accuracy Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Moment prediction bias: Video retrieval accuracy im- proves as the number of retrieved videos increases, indicating that the probability of predicting the correct moment also increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' However, when the number of retrieved videos exceeds 2, moment prediction accuracy hardly increases, which means that predicted moments for most queries come from the top 2 videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' segment of a video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' VCMR consists of two sub-tasks: video retrieval (VR) and single video moment retrieval (SVMR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The goal of VR is to retrieve videos that may contain the target moment via a natural language query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' And SVMR aims to use the query to localize the target moment in the retrieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' According to different strategies to learn two sub-tasks, existing methods can be divided into one-stage method and two-stage method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' One-stage method [16,18,31,32] treats VCMR as a multi-task learning problem, using a shared backbone with two different heads to learn VR and SVMR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Whereas two-stage method [15] leverages a pipeline of two independent modules to learn the two sub-tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Specially, it first trains a video retriever by query-video pairs to learn VR, then takes advantage of Shared Normalization (Shared- Norm) [7] technique to train localizer to learn SVMR, where the negatives for Shared-Norm are from the training data sampled by the trained retriever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In inference, it first uses retriever to select the most relevant K videos from cor- 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='13606v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='CV] 29 Jan 2023 00:47:12\uf0e000:49:42 House : or that these two have green eyes?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=" 00:51:31\uf0e000:55:14 Foreman : You're not saying." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=" They're not brother and sister." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 00:43:18\uf0e000:46:40 House : Is it a coincidence that your sister has great hair, Query: House shows a picture of the patient to his team and they have concluded that maybe the two are not related by blood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Latent key content: The images with a red border are visual key content because these are relevant to “House shows a picture of the patient to his team” in query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The highlighted subti- tle is textual key content, for it relates to ”they have concluded that maybe the two are not related by blood”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' pus, then uses localizer to localize the candidate moments in the K videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The final predicted moment depends on both retrieval score and localization score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Two-stage method is more suitable for VCMR because (1) Shared-Norm can en- hance the possibility of the target moment appearing in the correct video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (2) Two-stage method can select models with different query-video interaction modes in the two mod- ules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For example, it select late-fusion model as retriever for fast video retrieval, and leverage early-fusion model as localizer for accurate moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' State-of-the-art model [15] for VCMR is also based on two-stage method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' However, two problems limit the performance of two- stage method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The first is Moment prediction bias: as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, the final predicted moments for most queries are from the top-ranked videos among the K re- trieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This is counter-intuitive because the more videos retrieved, the more likely those videos contain the correct moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This bias neglects the possibility that the target moment is in the bottom-ranked videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The reason for this bias is that although two-stage method uses Shared- Norm to normalize the probability of correct moment across correct video and negative videos, it still only normalizes the probability of the candidate moments in a single video during inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This inconsistency in training and infer- ence results in the incomparable localization scores of can- didate moments during inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Since the final predicted moment depends on both video retrieval score and moment localization score, the incomparable localization scores will make the final moment mainly depend on video retrieval scores, resulting in the final predicted moment more tend- ing to come from videos with higher rankings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The sec- ond problem is Latent key content: the localizer of two- stage method neglects key content from different modali- ties for moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Video is usually composed of multimodal information, such as images (vision) and subti- tles (text).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2, visual information and tex- tual information have different emphases, if we can find out the important visual information and textual information as clues, it will help better moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In this paper, we propose MultI-video raNking with mUlTimodal cluE (MINUTE) to improve the two prob- lems of two-stage method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For the first problem, we keep the consistence of Shared-Norm between training and in- ference, which forces the localization scores of candidate moments among multiple videos retrieved by retriever to be comparable during inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' On this basis, we derive a new scoring function to rank the candidate moments, which can combine the scores of video retrieval and moment lo- calization more effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For the second problem, we propose an early-fusion localizer with a Multimodal Clue Mining (MCM) component which can discover key content from different modalities to help moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Spe- cially, MCM first uses query to measure the importance of all images and subtitles in the video, then assigns weights to these elements according to their importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The ele- ments with high importance can be seen as key clues to im- prove moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Then we feed weighted video representation together with query representation to a mul- timodal Transformer that captures deeper interactions be- tween video and query to predict moments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We conduct extensive experiments on TVR and DiDeMo datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The experimental results show that our proposed MINUTE outperforms other baselines, achieving a new state-of-the-art result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Ablation experiments verify that our method improves the two problems of two-stage method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Related Work We first briefly introduce works related to two sub-tasks of VCMR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' After that, we introduce recent works for VCMR in detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Text-video retrieval is a cross-modal retrieval task whose goal is to retrieve relevant videos from a corpus through a natural language query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This task is similar to VR of VCMR task, but most content of the video in the former is relevant to the query, while only a small part of the con- tent of the video in the latter is relevant to the query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The works for text-video retrieval can be divided into two cat- egories depending on the interaction mode between query and video, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', late fusion and early fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Late-fusion methods [8,21,27] use two separated encoders to embed im- ages and videos into a shared semantic space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' These models can be very efficient if we calculate and index each modal representation offline, for only the similarity between video and query should be applied in inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Early-fusion methods [6,12,25] make fine-grained interactions between video and query with an attention mechanism [2,24] to im- prove retrieval accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Temporal language grounding is a task similar to SVMR, which requires localizing a moment from a video given a natural language query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Temporal language grounding can be seen as a special case of VCMR, with only one video in the corpus for each query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' According to the way of pre- dicting moment, the existing works for temporal language grounding can be divided into proposal-based and proposal- 2 free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Proposal-based method [3,5,13,19,26,34] first gener- ates several proposals as candidates and then ranks the pro- posals according to their matching degree with the query, and the proposal with the highest matching degree is re- garded as the answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Unlike the proposal-based method, proposal-free method [4,17,29,30,33] directly predicts the start and end times of the moment without pre-extracting proposals as candidates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Video corpus moment retrieval is first proposed by [9], then [16] propose a new dataset TVR for VCMR who ex- tends the uni-modal video (image) in the previous dataset video to multiple modalities (image and subtitle).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The ex- isting works for VCMR can be divided into two categories depending on how they learn the two sub-tasks, e,g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', one- stage [16, 18, 31, 32] method and two-stage method [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The one-stage method treats VCMR as a multi-task learn- ing problem , using a shared model with two different heads to learn VR and SVMR simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' XML [16] is the first one-stage method for VCMR who uses a late-fusion model to encode video and query separately and then uses two different heads to learn the two tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' ReLoCLNet [32] leverage contrastive learning to enhance the performance of XML.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' [18] also follows XML and proposes a video- language pre-train model HERO, which significantly im- proves the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' HAMMER [31] is an early-fusion one-stage model that uses attention to make deep inter- actions between query and video for more accurate mo- ment retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Two-stage method leverages two different modules to learn two sub-tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' CONQUER [15] is the only two-stage method that uses video retrieval heads of HERO [18] as the retriever and proposes a model based on context-query attention (CQA) [28] as the localizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' CON- QUER achieves state-of-the-art results on VCMR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In train- ing, CONQUER uses Shared-Norm [7] technique to train localizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In inference, CONQUER first uses a video re- triever to retrieve top-K videos, then uses a moment local- izer to localize the moment in the retrieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Two- stage method is more suitable for VCMR, but it suffers from moment prediction bias and latent key content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In this paper, we focus on improving the two problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Background We first formulate VCMR, then describe two-stage method, followed by analyzing moment prediction bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Task Formulation We denote a corpus of videos V = {v1, v2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', v|V|} where |V| is the number of videos in corpus and vi = {f 1 i , f 2 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', f |vi| i } the i-th video which contains |vi| frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Each frame f j i consists of an image and a subtitle (Ij i , sj i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Note that if it contains no subtitle, sj i is set to empty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Given a natural language query q = {w1, w2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', w|w|} which consists of a sequence of words, the goal of VCMR is to retrieve most relevant moment m∗ from V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The target moment m∗ is a temporal segment (τ∗,st, τ∗,ed) in video v∗, where v∗ denotes the video that contains the target moment whose start and end timestamps are τ∗,st and τ∗,ed respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The goal of VCMR can be seen as maximizing the prob- ability of target moment m∗ given the query q and the video corpus V: m∗ = argmax m P(m|q, V).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (1) According to the chain rule of conditional probability: P(m∗|q, V) = P(m∗|v∗, q) · P(v∗|q, V), (2) where P(v∗|q, V) and P(m∗|v∗, q) are the probabilities of retrieving a video v∗ from corpus V and localizing the target moment m∗ in the retrieved video respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The proba- bility of target moment depends on the probabilities of start and end timestamps: P(m∗|v∗, q) = Pst(τ∗,st|v∗, q) · Ped(τ∗,ed|v∗, q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (3) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Two-stage Method Two-stage method uses a video retriever to model P(v∗|q, V) and a moment localizer to model P(m∗|v∗, q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In training, two-stage method use margin-based loss [10] to train video retriever, then use Shared-Norm to train mo- ment localizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Specially, for a query, there is a positive video v+ whose moment (τ+,j, τ+,k) is ground truth and n negative videos {v− 1 , v− 2 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' , v− n } that do not contain target moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Shared-Norm is leveraged to normalize the prob- abilities of τ∗,j as start time and τ∗,k as end time across all frames in positive video and negatives, such as: Pst(τ+,j|v+, q) = exp(lst +,j) n+1 � a=1 |vb| � b=1 exp(lst a,b) , (4) where lst a,b is the the logits that b-th frame in video va is start timestamp of ground truth moment, and |vb| is the number of frame in a video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Training with Shared-Norm enhances the possibility of the target moment existing in the correct video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In inference, the retriever first uses the query to retrieve top-K videos from the corpus, then the localizer localizes the target moment in the retrieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The score of the final predicted moment (τi,j, τi,k) in video i with start time j and end time k depends on both retrieval score and local- ization score, the scoring function is: Si,jk = exp(α · SR i ) · SL i,jk, (5) where Si,jk is the final score of the predicted moment, SR i is the retrieval score of video vi , and SL i,jk is the localization 3 score of a moment in a video, and α is a hyper-parameter to encourage the target moment from top retrieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The retrieval score is computed by cosine similarity between query representation and video representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' And the lo- calization score is computed by the probability of a moment in a single video: SL i,jk = Pst(τi,j|vi, q) · Ped(τi,k|vi, q), (6) where Pst(τi,j|vi, q) or Ped(τi,k|vi, q) is normalized across a single video : Pst(τi,j|vi, q) = exp(lst i,j) |vi| � b=1 exp(lst i,b) , (7) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Moment Prediction Bias As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, the final predicted moments of two-stage method for most queries come from top-ranked videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This bias limits the performance of two-stage method on VCMR, because it neglects the possibility of the target moment existing in the bottom-ranked videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We conjecture that this bias mainly comes from the inconsis- tency of normalization during training and inference, shown in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (4) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In training, it uses Shared-Norm to highlight the signif- icance of the correct moment being in the correct video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Nevertheless, in inference, this probability is based on ev- ery single video, resulting in the predicted candidate mo- ments from different videos being incomparable, so the sig- nificance no longer exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Therefore, the score of the final predicted moment in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (5) is more dependent on video retrieval score, making the final predicted moment more likely to be from the top-ranked videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Method We first illustrate how we improve moment prediction bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Then we introduce the proposed model MINUTE, we emphasize multimodal clue mining component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Finally, we describe the training of MINUTE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Multi-video Moment Ranking in Prediction We propose to adopt Shared-Norm in inference, so that the localization scores of candidate moments from multiple videos are comparable, which can enhance the influence of moment localization score SL i,jk on the final score Si,jk to improve moment prediction bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Furthermore, we derive a new scoring function from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (2) to combine the video retrieval and moment localization scores more effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Specially, to compute P(v∗|q, V), we obtain video repre- sentation vi = {f 1 i , f 2 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', f |vi| i } and query representation q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In the following paper, we use bold notations to denote vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The j-th frame representation f j i consists of image representation and subtitle representation (Ij i , sj i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Query also has two representations (qI, qs) to compute similarity scores for images and subtitles respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The query and video representations details are in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Because only part of the content in the video is related to the query, the similarity score between the query and video SR i is the average of max-pooling of query-image scores and max-pooling of query-subtitle scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We use the inner product as the similarity score sim(): sim(qc, cj i) = qcT · cj i, c ∈ {I, s}, φc = max 1≤j≤|vi| sim(qc, cj i), SR i = φI + φs 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (8) The probability P(v∗|q, V) is computed by softmax nor- malized score across all query-video scores in corpus: P(v∗|q, V) = exp(SR ∗ ) �|V| j=1 exp(SR j ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (9) Computing the inner product between query and all videos in the corpus is computationally intensive, so we em- ploy Max Inner Product Search (MIPS) [22] to find top-K videos to approximate the probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The calculation of P(v∗|q, V) in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (9) can be approximated by P(v∗|q, V∗): P(v∗|q, V) ≈ P(v∗|q, V∗) = exp(SR ∗ ) �K j=1 exp(SR j ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (10) The probabilities of the rest videos in the corpus are con- sidered close to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The training of the retriever is to maxi- mize the log-likelihood of probability logP(v∗|q, V), which is different from the previous two-stage method who use margin-based loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' As for P(m∗|v∗, q), we use Shared-Norm in inference, which is consistent with that in training to improve moment prediction bias: P(m∗|v∗, q) ≈ P(m∗|V∗, q) = exp(lst ∗,j) K � a=1 |vi| � b=1 exp(lst a,b) exp(led ∗,k) K � a=1 |vi| � b=1 exp(led a,b) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (11) A well-trained localizer should suppress the probability that the target moment appears in the wrong videos to close to zero, so P(m∗|V∗, q) approximately equals to P(m∗|v∗, q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The details of logits lst ∗,j are introduced in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Combine Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (2), Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (10) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (11), the probability P(m∗|v∗, q) can be computed by: P(m∗|v∗, q) ≈ exp(SR ∗ ) �K j=1 exp(SR j ) exp(lst ∗,j) K � a=1 |vi| � b=1 exp(lst a,b) exp(led ∗,k) K � a=1 |vi| � b=1 exp(led a,b) , (12) 4 where the denominator is the same for all candidate mo- ments from K videos, so we can simplify this probability to a new scoring function: S∗ = SR ∗ + lst ∗,j + led ∗,k, (13) where lst ∗,j + led ∗,k = SL ∗,ij represents moment localization score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This scoring function is simpler than Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (5) and without hyper-parameter α which may greatly increase the weight of the top-ranked video retrieval score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In inference, we use scoring function in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (13) to rank all moments in multiple retrieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Model We propose a two-stage MINUTE model consisting of a late-fusion video retriever and an early-fusion moment lo- calizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='1 Video Retriever The goal of video retriever is to select a small subset V∗ from the corpus V given the query q, where videos in the subset may contain the target moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The retriever of the proposed model is a late-fusion model that contains two en- coders, a query encoder and a video encoder, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The late-fusion architecture ensures retrieval effi- ciency if we index the representations of videos in advance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Video Encoder The video encoder encodes frames in the i-th video to frame representations vi = {f 1 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', f |vi| i }, where the j-th frame f j i contains image representation Ij i and subtitle representation sj i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We first use RoBERTa [20] to extract sentence features of subtitle and use Slow- Fast [11] and ResNet [14] to extract image features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Then we feed subtitle features and image features to a one- layer multi-modal Transformer that simultaneously cap- tures intra-modal and inter-modal dependencies to output each image representation Ij i and subtitle representation sj i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Query Encoder The query encoder convert query q = {w1, w2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', w|q|} to query representation q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We first use RoBERTa to extract the feature wj of each word in the query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' A one-layer Transformer is used to capture the con- textual representation of each word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We generate two query representations for query-image similarity score and query- subtitle similarity score, denoted as qI and qs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We adopt a modular pooling mechanism [16] to convert the sequence representations to the two vectors: oi = Wcwi, αi = exp(oi) |q| � j=1 exp(oj) , qc = |q| � i=1 αiwi, (14) where Wc is learnable parameters, c ∈ {I, s}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The modular mechanism can be regarded as a learnable pooling and is also used in previous works [16,18,32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Query: Foreman tells Enid why he had to sedate the patient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Transformer Corpus Enid : Did he need a sedative?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' I did.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 00:48:45\uf0e000:52:12 subtitles images SlowFast ResNet RoBERTa PE Multimodal Transformer RoBERTa ME ME PE Query Encoder Video Encoder Video 𝑰1 𝑰2 𝑰|𝑣| 𝒔1 𝒔2 𝒔|𝑣| Modular Pooling 𝒒𝐼 𝒒𝑠 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Video retriever consists of two encoders, video encoder and query encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' ’ME’ and ’PE’ represent modality embedding and positional embedding, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We also use the retrieval head of HERO [18] as retriever for a fair comparison with CONQUER [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The original HERO uses margin-based loss [10] to train video retrieval whose retrieval score only represents cosine similarity be- tween query and videos, so we re-train HERO in the same way as training the proposed retriever to model the proba- bility P(v∗|q, V) in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We use simple retriever to denote the proposed retriever and HERO retriever to de- note the retriever based on HERO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2 Moment Localizer Moment localizer shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4 uses the query to localize the target moment m∗ in the top-K retrieved videos V∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The proposed localizer is based on early-fusion architecture to explore deeper interactions between query and video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Be- cause the retrieved videos are narrowed down to a small range, the amount of computations is acceptable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The localizer first uses query encoder to get token rep- resentations { ¯ w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', ¯ w|q|} and video encoder to get video representation ¯vi = { ¯ f 1 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', ¯ f |vi| i }, where ¯ f j i contain an image representation and a subtitle representation (¯Ij i , ¯sj i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Video encoder and query encoder in localizer are same with those in retriever but do not share parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Our proposed localizer consists of two components: 5 query Transformer Modular Pooling Video Encoder Multimodal Clue Mining Multimodal Transformer FC Query encoder ഥ𝒒𝐼 ഥ𝒒𝑠 ത𝑰 ത𝒔 \u0de0𝑰 ො𝒔 \u0de0𝒇 ഥ𝒘 video 1D Conv 1D Conv 𝑙𝑠𝑡 𝑙𝑒𝑑 ഥ𝒘 Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Moment localizer contains two components, multimodal clue mining and multimodal Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For brevity, we omit the subscripts of the representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' multimodal clue mining and multi-modal Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Multimodal Clue Mining (MCM) solves late key content problem by discovering important content from multiple modalities of video to help moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' MCM first uses query to measure the importance of each image and subtitle in video, then assigns weights to these elements from different modalities according to importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Specially, we leverage modular pooling to obtain query representations ¯qI and ¯qs to measure image importance and subtitle importance respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The importance is com- puted by: pj c = ( ¯ Wc¯cj) ⊙ ¯qc, c ∈ {I, s}, (15) where ¯ Wc is learnable parameters, and pj c is the importance of j-th image or subtitle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Then we use the importance to weight the image and subtitle representations: ˆcj = norm(pj c) ⊙ ¯cj, c ∈ {I, s}, (16) where ˆcj is weighted image representation or subtitle rep- resentation and norm is L2-normalization which makes the model converge better.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' MCM can be seen as an amplifier that allows localizer to focus on important content which we call clues from multi- ple modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We fuse the weighted representations ˆIj and ˆsj in a frame by a fully-connect layer: ˆ f j = FC([ˆIj;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' ˆsj]), (17) where [;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' ] is concatenation and ¯f j is the fused represen- tation the j-th frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The fused video representation is ˆvi = { ˆ f 1 i , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', ˆ f |vi| i } and are fed to a multimodal Trans- former together with query token representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Multimodal Transformer (MMT) We use a three-layer multi-modal Transformer to make deep interactions be- tween fused video representation and token representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In addition, two 1D-convolution layers are leveraged to cap- ture dependencies between adjacent frames and output log- its lst i,j, led i,k of the start and end times of the target moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Training We first train retriever by text-video pairs, then use the trained retriever to sample negative videos as hard negatives to train localizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Training retriever To maximize the log-likelihood of prob- ability logP(v∗|q, V) in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (9), we adopt InfoNCE [23] loss with in-batch negative sampling to train retriever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Spe- cially, let d = {(v1, q1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=', (vb, qb)} denote training data in a batch, where b is batch size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Each pair (vi, qi) in d has b − 1 negative samples for query-to-video loss or video-to- query loss, such (vz, qi)z̸=i and (vi, qz)z̸=i: Lv = −log exp(SR i,i) b� z=1 exp(SR z,i) , Lq = −log exp(SR i,i) b� z=1 exp(SR i,z) , (18) where Lv and Lq are query-to-video loss and video-to- query loss, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We use the sum of the two losses to train retriever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Training localizer We use the well-trained retriever to re- trieve top-ranked videos from training data and sample n videos as hard negatives to train the localizer with Shared- Norm technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Lst = −log exp(lst +,j) n+1 � a=1 |vb| � b=1 exp(lst a,b) , Led = −log exp(led +,k) n+1 � a=1 |vb| � b=1 exp(led a,b) , (19) The sum of Lst and Led are used to train localizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Experiment We first introduce datasets and metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Then we de- scribe implementation details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' After that, we introduce ex- 6 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Comparisons of VCMR results(IoU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7) with baselines on TVR validation set and testing set.’SR’ denotes simple re- triever, and ’HR’ denotes HERO retriever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Model Validation Testing R1 R10 R100 R1 R10 R100 XML 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='62 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='05 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='47 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='32 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='41 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='52 ReLoCLNet 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='15 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='06 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='42 HAMMER 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='13 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='38 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='71 HERO 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='13 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='26 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='55 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='21 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='34 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='66 CONQUER 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='76 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='49 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='17 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='24 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='67 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='98 MINUTE(SR) 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='17 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='38 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='93 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='59 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='96 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='23 MINUTE(HR) 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='70 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='37 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='09 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='60 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='72 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='23 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Comparisons of VCMR results with baselines on DiDeMo testing set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Model IoU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5 IoU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7 R1 R5 R10 R1 R5 R10 XML 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='36 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='42 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='59 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='77 HERO 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='37 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='97 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='26 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='76 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='73 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='78 CONQUER 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='31 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='27 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='99 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='79 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='04 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='90 MINUTE(HR) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='44 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='62 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='62 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='81 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='89 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='03 perimental results comparison with baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Then we il- lustrate ablation studies of the proposed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Finally, we present the case study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Datasets TVR [16] is built on TV Shows whose videos consist of images and subtitles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' TVR contains 17435, 2179, and 1089 videos on the training, validation, and testing sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The av- erage length of the videos is 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2 seconds, while the average length of the moments is 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='1 secs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' DiDeMo [1] is a dataset whose videos are from the real world, with only images and no subtitles in the video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' DiDeMo contains 8395, 1065, and 1004 training, valida- tion, and testing videos, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The average duration of videos and moments is 54 secs and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5 secs, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Evaluation Metrics We follow the metrics in [16] as evaluation metrics of ex- periments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For VCMR task, the evaluation metric is R@K, IoU=p that represents the percentage that at least one pre- dicted moments whose Intersection over Union(IoU) with the ground truth exceed p in the top-K retrieved moments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The two sub-tasks are also evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The metric of SVMR task is the same as that of VR task, but the evaluation is conducted in only ground truth video for each query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' As for VR task, the metric is R@K which denotes the percentage that correct video is in the top-K ranked videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Implementation Details Training We train simple retriever for 100 epochs with the batch size 256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' As for localizer, we sample 4 and 2 negative Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Comparisons of VR results with baselines on TVR vali- dation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Model R@1 R@5 R@10 R@100 XML 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='54 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='11 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='41 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='22 ReLoCLNet 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='13 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='85 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='25 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='21 HERO 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='01 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='82 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='07 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='91 SR 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='12 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='86 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='83 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='22 HR 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='88 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='62 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='35 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='26 Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Comparisons of SVMR results with baselines on TVR Validation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Model IoU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5 IoU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7 R1 R10 R100 R1 R10 R100 XML 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='43 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='89 ReLoCLNet 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='88 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='04 HERO 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='22 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='08 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='66 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='30 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='84 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='45 CONQUER 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='63 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='84 MINUTE(SR) 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='49 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='62 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='57 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='98 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='30 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='13 MINUTE(HR) 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='74 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='90 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='80 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='08 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='10 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='45 videos for each query from top-100 ranked videos on TVR and DiDeMo respectively, and train it for 10 epochs with the batch size 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Both simple retriever and localizer are trained by AdamW with the learning rate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='0001 and the weight decay of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='01 in a single 3090 GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For HERO retriever, we retrain it with InfoNCE loss in 8 3090 GPUs with the same setting as the original HERO [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Inference The localizer localizes the target moment in the top-10 retrieved videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The length of predicted moments is limited to [1, 24] and [1, 7] for TVR and DeDiMo, respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We use non-maximum suppression(NMS) with the IoU 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7 to post-process the predicted moments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Comparison with Baselines We compare the proposed model with baselines on VCMR task including four one-stage models XML [16], ReLoCLNet [32], HAMMER [31], HERO [18] and a two- stage model CONQUER [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' TVR As shown in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, the proposed models outperform all baseline methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Compared with the best previous method CONQUER who also uses HERO to address the VR task, our proposed model with HERO retriever achieves 36% improvement at R@1 on the testing set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We also re- port the results on two sub-task in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 and Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For VR, HERO retriever trained by InfoNCE loss has better re- trieval accuracy than the original HERO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' For SVMR, our proposed models also achieve the best results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' It is worth noting that the proposed model with simple retriever out- performs CONQUER on VCMR even though the perfor- mance of VR(R@1 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='12) is much worse than that in CON- QUER(R@1 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='01).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This is because moment prediction bias limits the performance of CONQUER.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' DiDeMo We report the VCMR results on DiDeMo testing 7 1 2 3 4 5 6 7 8 9 10 Number of retrieved videos 7 8 9 10 R@1, IOU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7 MINUTE(HR) CONQUER CONQUER* Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The performances of VCMR of our model and COU- QUER under different numbers of the retrieved videos, where ’CONQUER*’ denotes CONQUER with our retriever and scoring function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Table 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Performances of VCMR and SVMR (R@1, IOU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7) when remove two componets in localizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' MCM denotes multi- modal clue mining, and MMT represents multimodal Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Model VCMR SVMR 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7 MINUTE(HR) 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='22 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='70 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='74 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='08 w/o MCM 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='21 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='17 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='41 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='46 w/o MMT 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='71 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='66 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='97 set in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The performance of the proposed model is still better than others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' All the methods perform worse than the results on TVR because the DiDeMo dataset is designed for temporal language grounding, so the difficulty of retrieving video is not considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The query of DiDeMo is not as spe- cific as that of TVR, such as ”a girl is playing ball”, making it hard to retrieve the correct video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Moment Prediction Bias As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5, when the number of retrieved videos increases, the performance of our model improves, but the CONQUER does not change much, which indicates that moment prediction bias limits its performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' This bias is from the inconsistency of Shared-Norm in training and inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Our prediction based on the scoring function in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' (13) addresses this prediction bias by ranking mo- ments in multiple retrieved videos in inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' When we replace CONQUER’s retriever and scoring function with ours, CONQUER* in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5 can also improve moment pre- diction bias, showing the proposed model’s effectiveness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Multimodal Clue Mining We perform ablation studies on the effectiveness of two components of localizer in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' When removing MCM, the accuracy drops, which shows that discovering key con- tent from images and subtitles as clue is helpful for moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' When we only use MCM, the accuracy drops a lot, indicating that using clues is not enough, fine-grained cross-modal interactions are also needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='5s MINUTE 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='01s 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='91s 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='00s 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='00s Ground Truth CONQUER 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='50s Query: Amy and Bernadette spin around on their bar seats to face the other way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 00:35:01\uf0e000:37:39 Bandleader : Mr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' and Mrs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Chandler Bing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 00:30:51\uf0e000:34:51 Bandleader : Ladies and gentlemen, it gives me great pleasure to introduce to you : 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='46s 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='40s Ground Truth 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='00s 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='00s MINUTE Query: The bandleader announces Chandler and Monica and they walk into the room.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='00s 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='00s CONQUER Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Two cases on TVR from the proposed model and CON- QUER.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Case Study We show two cases of VCMR in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In the first case, two models retrieve the correct video first, the moment pre- dicted by the proposed model is closer to the ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' The proposed model captures key images related to ”they walk into the room” to help localize the moment, indicating the effectiveness of MCM in our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In the second case, both models rank the wrong video first because the scenario in this video is similar to that in the correct video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' CON- QUER fails to predict correct moment from correct video, for it places too much emphasis on top-ranked videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Our proposed model can predict correct moment, which verifies that our prediction improves moment prediction bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Conclusion In this paper, we propose a model MultI-video raNking with mUlTimodal cluE (MINUTE) improving two prob- lems of two-stage method on video corpus moment retrieval task, moment prediction bias and latent key content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' We first analyze the reason for moment prediction bias that in- consistency of Shared-Norm in training and inference, then we adopt Shared-Norm in inference and rank moments in multiple videos based on our derived scoring function to improve moment prediction bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' As for latent key con- tent, we propose a multimodal clue mining component to discover important content from two modalities of video as clue for better moment localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Extensive experiments on two datasets TVR and DiDeMo show that our proposed model improves two problems and achieves a new state-of- the-art of video corpus moment retrieval task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' References [1] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Localizing mo- 8 ments in video with natural language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ICCV, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 7 [2] Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Neural machine translation by jointly learning to align and translate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ICLR, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [3] Jingyuan Chen, Xinpeng Chen, Lin Ma, Zequn Jie, and Tat- Seng Chua.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Temporally grounding natural sentence in video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In EMNLP, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [4] Long Chen, Chujie Lu, Siliang Tang, Jun Xiao, Dong Zhang, Chilie Tan, and Xiaolin Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Rethinking the bottom-up frame- work for query-based video localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In AAAI, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [5] Shaoxiang Chen and Yu-Gang Jiang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Semantic proposal for activity localization in videos via sentence query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In AAAI, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [6] Shizhe Chen, Yida Zhao, Qin Jin, and Qi Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Fine-grained video-text retrieval with hierarchical graph reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In CVPR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [7] Christopher Clark and Matt Gardner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Simple and effective multi-paragraph reading comprehension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ACL, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 3 [8] Jianfeng Dong, Xirong Li, Chaoxi Xu, Xun Yang, Gang Yang, Xun Wang, and Meng Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Dual encoding for video retrieval by text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' TPAMI, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [9] Victor Escorcia, Mattia Soldan, Josef Sivic, Bernard Ghanem, and Bryan C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Russell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Temporal localization of moments in video collections with natural language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' CoRR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 3 [10] Fartash Faghri, David J Fleet, Jamie Ryan Kiros, and Sanja Fidler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Vse++: Improving visual-semantic embeddings with hard negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' arXiv preprint arXiv:1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='05612, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3, 5 [11] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Slowfast networks for video recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ICCV, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5 [12] Valentin Gabeur, Chen Sun, Karteek Alahari, and Cordelia Schmid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Multi-modal transformer for video retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ECCV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Springer, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [13] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Tall: Temporal activity localization via language query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ICCV, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Deep residual learning for image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In CVPR, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5 [15] Zhijian Hou, Chong-Wah Ngo, and Wing Kwong Chan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Conquer: Contextual query-aware ranking for video corpus moment retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ACM MM, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 2, 3, 5, 7 [16] Jie Lei, Licheng Yu, Tamara L Berg, and Mohit Bansal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Tvr: A large-scale dataset for video-subtitle moment retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ECCV, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 3, 5, 7 [17] Kun Li, Dan Guo, and Meng Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Proposal-free video grounding with contextual pyramid network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In AAAI, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [18] Linjie Li, Yen-Chun Chen, Yu Cheng, Zhe Gan, Licheng Yu, and Jingjing Liu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Hero: Hierarchical encoder for video+ lan- guage omni-representation pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In EMNLP, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 3, 5, 7 [19] Daizong Liu, Xiaoye Qu, Jianfeng Dong, Pan Zhou, Yu Cheng, Wei Wei, Zichuan Xu, and Yulai Xie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Context-aware biaffine localizing network for temporal sentence grounding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In CVPR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [20] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettle- moyer, and Veselin Stoyanov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Roberta: A robustly optimized bert pretraining approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' arXiv preprint arXiv:1907.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='11692, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 5 [21] Mandela Patrick, Po-Yao Huang, Yuki Asano, Florian Metze, Alexander G Hauptmann, Joao F Henriques, and An- drea Vedaldi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Support-set bottlenecks for video-text repre- sentation learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ICLR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [22] Anshumali Shrivastava and Ping Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Asymmetric lsh (alsh) for sublinear time maximum inner product search (mips).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In NeurIPS, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 4 [23] Aaron Van den Oord, Yazhe Li, and Oriol Vinyals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Repre- sentation learning with contrastive predictive coding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' arXiv e-prints, pages arXiv–1807, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 6 [24] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko- reit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' NeurIPS, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [25] Xiaohan Wang, Linchao Zhu, and Yi Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' T2vlad: global- local sequence alignment for text-video retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In CVPR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [26] Shaoning Xiao, Long Chen, Jian Shao, Yueting Zhuang, and Jun Xiao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Natural language video localization with learnable moment proposals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In EMNLP, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [27] Xun Yang, Jianfeng Dong, Yixin Cao, Xun Wang, Meng Wang, and Tat-Seng Chua.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Tree-augmented cross-modal en- coding for complex-query video retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In SIGIR, pages 1339–1348, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 2 [28] Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Qanet: Combining local convolution with global self- attention for reading comprehension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ICLR, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [29] Yitian Yuan, Tao Mei, and Wenwu Zhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' To find where you talk: Temporal sentence localization in video with attention based location regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In AAAI, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [30] Runhao Zeng, Haoming Xu, Wenbing Huang, Peihao Chen, Mingkui Tan, and Chuang Gan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Dense regression network for video grounding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In CVPR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [31] Bowen Zhang, Hexiang Hu, Joonseok Lee, Ming Zhao, Sheide Chammas, Vihan Jain, Eugene Ie, and Fei Sha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' A hierarchical multi-modal encoder for moment localization in video corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' arXiv preprint arXiv:2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content='09046, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 3, 7 [32] Hao Zhang, Aixin Sun, Wei Jing, Guoshun Nan, Liangli Zhen, Joey Tianyi Zhou, and Rick Siow Mong Goh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Video corpus moment retrieval with contrastive learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In SIGIR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 1, 3, 5, 7 [33] Hao Zhang, Aixin Sun, Wei Jing, and Joey Tianyi Zhou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Span-based localizing network for natural language video lo- calization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In ACL, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 [34] Mingxing Zhang, Yang Yang, Xinghan Chen, Yanli Ji, Xing Xu, Jingjing Li, and Heng Tao Shen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' Multi-stage aggregated transformer network for temporal language localization in videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' In CVPR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} +page_content=' 3 9' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/29FRT4oBgHgl3EQfnzcq/content/2301.13606v1.pdf'} diff --git a/2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss b/2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..d310d6e4c6d9784176f97b27c69f2afca0dce7a7 --- /dev/null +++ b/2NE2T4oBgHgl3EQfNgYt/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8dffc36cef7cdd3cc2c12514e9070899d8d716dad39f054de952dc7713f6baa +size 6750253 diff --git a/39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf b/39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d683e77f59713ed76c23169487a03c0432559455 --- /dev/null +++ b/39AyT4oBgHgl3EQfcPct/content/2301.00277v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6647a25a9ba0414595f017775ebc1146276aeb6607f51108c6892ba8377dc4c3 +size 388877 diff --git a/39AyT4oBgHgl3EQfcPct/vector_store/index.pkl b/39AyT4oBgHgl3EQfcPct/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ed761de69e9fdf14b618c6d95bad2486043a4622 --- /dev/null +++ b/39AyT4oBgHgl3EQfcPct/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd2f19689d50ece20291f64bc589849f6ef378756974799992afe5a6553994d1 +size 163190 diff --git a/39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss b/39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..6111daf1653fc16cb80b7182493f969377248d6d --- /dev/null +++ b/39FAT4oBgHgl3EQfEhxj/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:720fbd2fac88be16d7975ff17a069543a410127a6fb99b352a33ff969a2191f2 +size 2555949 diff --git a/39FAT4oBgHgl3EQfEhxj/vector_store/index.pkl b/39FAT4oBgHgl3EQfEhxj/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c8b5f391631d47b150640090f87e2c1a575fdbc8 --- /dev/null +++ b/39FAT4oBgHgl3EQfEhxj/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a45ffb958c4d15140f3f3107fa33631a406785baa23412078b63d381e811a4e1 +size 103826 diff --git a/3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf b/3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..849282bba7aed7aa53c55b0c0aa404a4c6cc4c5d --- /dev/null +++ b/3dFLT4oBgHgl3EQfry9C/content/2301.12145v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cfb0d8c8e5aca35be04e1bb0b14c51426a4c8aff006e7c8ee82148749f1cfda +size 322045 diff --git a/3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss b/3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..07a1dd7fd8b0b7af7006abb927e9a5bbe41bc0d3 --- /dev/null +++ b/3dFLT4oBgHgl3EQfry9C/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97298f48671b124e453e08fa1e578285c19f8241b807dff4b02afe674ced5d3a +size 3473453 diff --git a/3dFLT4oBgHgl3EQfry9C/vector_store/index.pkl b/3dFLT4oBgHgl3EQfry9C/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9269f2267bb7427ff8f141bd84db1a31382e53f2 --- /dev/null +++ b/3dFLT4oBgHgl3EQfry9C/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:390375edbdd70ecaa7775d509ababd7adbb9d47b90ca449798581b9395faa192 +size 124263 diff --git a/3tA0T4oBgHgl3EQfNP_j/content/tmp_files/2301.02145v1.pdf.txt b/3tA0T4oBgHgl3EQfNP_j/content/tmp_files/2301.02145v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..3149ebc4a23b518bb61367f60782bfad664c5b94 --- /dev/null +++ b/3tA0T4oBgHgl3EQfNP_j/content/tmp_files/2301.02145v1.pdf.txt @@ -0,0 +1,1565 @@ +JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +1 +Domain Generalization via Ensemble Stacking for Face Presentation +Attack Detection +Usman Muhammad1, Djamila Romaissa Beddiar1, and Mourad Oussalah1, Fellow, IEEE +1 Center for Machine Vision and Signal Analysis, University of Oulu, Finland +Face presentation attack detection (PAD) plays a pivotal role in securing face recognition systems against spoofing attacks. Although +great progress has been made in designing face PAD methods, developing a model that can generalize well to an unseen test domain +remains a significant challenge. Moreover, due to different types of spoofing attacks, creating a dataset with a sufficient number +of samples for training deep neural networks is a laborious task. This work addresses these challenges by creating synthetic data +and introducing a deep learning-based unified framework for improving the generalization ability of the face PAD. In particular, +synthetic data is generated by proposing a video distillation technique that blends a spatiotemporal warped image with a still image +based on alpha compositing. Since the proposed synthetic samples can be generated by increasing different alpha weights, we train +multiple classifiers by taking the advantage of a specific type of ensemble learning known as a stacked ensemble, where each such +classifier becomes an expert in its own domain but a non-expert to others. Motivated by this, a meta-classifier is employed to learn +from these experts collaboratively so that when developing an ensemble, they can leverage complementary information from each +other to better tackle or be more useful for an unseen target domain. Experimental results using half total error rates (HTERs) on +four PAD databases CASIA-MFSD (6.97%), Replay-Attack (33.49%), MSU-MFSD (4.02%), and OULU-NPU (10.91%)) demonstrate +the robustness of the method and open up new possibilities for advancing presentation attack detection using ensemble learning +with large-scale synthetic data. +Index Terms—Face Anti-Spoofing, Ensemble Learning, Deep Learning, Synthetic Data, LSTM. +I. Introduction +O +VER the past few decades, facial recognition (FR) +technology has been frequently used in numerous real- +world applications, such as mobile payments, access control, +immigration, education, surveillance, and healthcare [1]. The +accuracy of FR is no longer a major concern and the error +rate has dropped to 0.08%, according to tests conducted by +the National Institute of Standards and Technology (NIST) +[2]. Despite great success, a simple FR system might be +vulnerable to spoofing, known as a presentation attack. For +instance, print attacks, video replay, and 3D masks are the +most common attacks reported recently in the face anti- +spoofing domain [3], [4]. Thus, a number of hand-crafted and +deep representation methods have been proposed to protect FR +systems against presentation attacks [5], [6], [7], [8], [9], [10], +[11]. Many of them report promising performance in intra- +domain testing scenario. However, the performance remains +limited in cross-dataset testing scenario due to distributional +discrepancy between source domain and the target domain. +One of the major reasons that deep-learning-based models +are prone to overfitting due to the lack of availability of a +sufficient amount of training samples in the source domain. +Another possible reason might be that many face PAD methods +assume that training and testing data come from the same +target distribution. However, if a model was trained on cut +photo attack images, would it work on mask attack images? +What if the model trained only on replay attack images and +tested in warped photo attacks? Is it possible to deploy a model +that is trained using different illumination conditions and +background scenes under control lighting systems? Answers to +Manuscript received January 1, 2022; revised August 26, 2022. Correspond- +ing author: M. Usman (email: Muhammad.usman@oulu.fi) +all these questions depend on how a machine learning model +can deal with this domain shift problem. Thus, to alleviate +this issue, domain adaptation (DA) techniques are used to +leverage a source dataset and maintain a good accuracy on +the target dataset by using unlabeled target data. However, in +many applications, it is difficult to collect sufficient target data. +For instance, in face PAD, hackers are using different types +of spoofing attacks which makes it impractical to collect each +type of new attack sample in advance. +To overcome the domain shift problem, domain generaliza- +tion (DG) methods have been introduced to improve the gener- +alization [9], [10], [11]. However, the generalization capability +of PAD methods remains challenging because either the deep +feature-based methods or low-level feature-based methods may +not generalize well into new applications. Generalizability +refers to the performance difference of a model when the PAD +models are trained and tuned on one or multiple databases and +then tested on a completely unseen database. As shown in +Fig.1, the goal of domain generalization is to use the training +samples from one or several different source domains but +related domains (i.e., diverse training datasets) that perform +well when evaluated on a completely unseen target domain. +To improve the generalization, the majority of recent ap- +proaches in face PAD such as adversarial learning [12], +meta pattern learning [13], generative domain adaptation [14], +hypothesis verification [15], or cross-adversarial learning [16], +address the domain generalization issue by exploiting a com- +mon feature space from multiple source domains, but the +performance remains limited due to a substantial distribution +difference among source domains. For instance, research in +[17] relies on a shared feature space and assumes that it +would also be invariant to domain shift. This assumption has a +flaw because when the source domains become more diverse, +arXiv:2301.02145v1 [cs.CV] 5 Jan 2023 + +JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +2 +Fig. 1: The source domains are trained with diverse sets of +synthetic images where the meta-learner seeks complementary +information to generalize well to unseen target distribution. +learning a domain-invariant model becomes more difficult +[18]. For instance, instead of concentrating on some domain- +specific differentiation cues such as cut photo texture cues +available in the CASIA database, models can be benefited from +generalized feature space if more generalized cues are shared +by all source domains [11]. In addition, spoofing attacks have +been launched physically by malicious hackers (i.e., outside +the control of the biometric system). Therefore, building new +datasets to collect large samples of fake faces, especially for +each type of new attack remain infeasible in the face anti- +spoofing domain. Although the dominant approaches such as +Generative adversarial networks (GANs) [19], Bidirectional +GANs [20], the DCGAN [21], can be applied to mitigate +the gap between the target domain and the source domain by +generating synthetic faces, these models require careful tuning +of their parameters. +In this paper, rather than proposing a specific model +suited for the intra-database testing scenario, a novel unified +framework is introduced based on the idea of stacking-based +ensemble learning to improve the generalization of the face +PAD. We first generate different sets of synthetic training +samples and then train different sub-models on each of the +synthetic sets to specialize in their own domain. More specif- +ically, our goal is to understand the relationship between the +spatiotemporal artifacts that appear in synthetic samples. Con- +sequently, we train three sub-models in which we investigate +the characteristics of these spatiotemporal artifacts. By doing +this, we assume that sub-models that are trained on specific +source domains would be experts in domain-specific sources +but non-expert in all other source domains as well as the +target domain. Motivated by this, we train a meta-learner that +minimizes the cross-domain generalization error by combining +the input predictions of all experts (sub-models). Thus, our +key idea is to train the sub-models separately so that when +forming stacking, a meta-learner can leverage complementary +information in order to better approach the target domain. +To achieve our goal, we first introduce a video distillation +technique to generate synthetic samples. This is inspired by +our previous works [8], [22] that claim estimation of global +motion is important for face PAD. Specifically, a 2D image +morphing technique is proposed with a combination of a warp +and a cross dissolve. The main idea is to blend the encoded +spatiotemporal warped images with the still images using +alpha blending. By doing so, we generate multiple sets of +2D synthetic images with different alpha weights and expand +the training samples significantly. Several synthetic examples +are shown in Fig.2. We then train different recurrent neural +networks with each subset of synthetic data and use the +prediction of each subset to train the meta-classifier. Moreover, +the interpretability methods are employed to further assess how +robust is the model, by revealing that the most significant areas +for determining the deep learning model decision on the PAD +task are consistent with motion cues associated with the arti- +facts, i.e., screen sloping, hand movement, material reflection, +and expression changes. Overall, the main contributions of this +study are five-fold: +• A video distillation technique is proposed to train a +2D CNN on a still image, where “still” encodes both +appearance and temporal information from the video +sequence into a single RGB image. +• 2D image morphing is introduced to create large-scale +synthetic training samples that greatly promote the per- +formance of the face anti-spoofing model. +• Stacked recurrent neural networks are utilized to predict +spatiotemporal inconsistencies and then those predictions +are employed to form the deep architecture (meta-model). +• Techniques of interpretation are provided for exploring +the decisions made by the employed model. The model +revealed that the motion cues are the most important +factors for distinguishing whether an input image is +spoofed or not. +• Experiments on four benchmark datasets, consisting +of CASIA-MFSD, Replay-Attack, MSU-MFSD, and +OULU-NPU databases, show that our proposed method +is significantly superior on three databases in comparison +with other state-of-the-art generalization methods used +now. +The rest of this work is organized as follows. Section II +discusses the recent developments and related past works. +Section III explains all the steps of the proposed method. +Section IV shows the implementation details, ablation study, +and comparison against several public benchmark datasets. +Section V concludes the entire work and gives suggestions +for future research. +II. Literature Review +Over the past few years, face PAD methods have re- +ceived considerable attention from both academia and in- +dustry. In general, these methods can be roughly classified +into appearance-based methods and temporal-based methods. +Appearance-based methods: Traditional appearance-based +methods usually extract hand-crafted features such as LBP +[23] and SIFT [24] based on various appearance cues. The +authors in [5] claimed that color information is crucial and +luminance-chrominance color spaces improve the detection + +Source domain +Domain +1 +α = 0.5 +α=1.0 +α=1.5 +Target domain +Meta- +Domain +learner +2 +α=0.5 +α=1.0 +α=1 +Domain +3 +α = 0.5 +α=1.0JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +3 +Fig. 2: 2D synthetic samples from CASIA-MFSD. Left col- +umn: Video sequence used to generate synthetic samples. +Right column: Spatiotemporal encoded images morphed with +the still image using alpha values of 0.5 (Synt 1), 1.0 (Synt 2), +and 1.5 (Synt 3), respectively. These synthetic samples can be +used for ensemble stacking to significantly improve the face +anti-spoofing performance. +performance of face PAD in comparison to the RGB and +the gray-scale image representations. The multiscale filtering +approach proposed in [25] was found to be effective where +LBP-based multiscale features provide improved performance. +Wen et al [26] utilize image distortion analysis (IDA) and +develop an ensemble classifier, where multiple SVM classifiers +are implemented. In particular, the features are selected based +on specular reflection, blurriness, chromatic moment, and color +diversity to provide input to SVM classifiers. A component- +based coding framework is proposed to encode different +components of the face in [27]. To deploy secure face locking +on a smartphone, a method is developed based on extracting +color distortion, Moiré-pattern analysis, surface reflection, and +shape deformation [24]. The LBP features are combined with +the feature maps of a deep learning model to improve the +detection of face PAD in [28]. The authors show that the need +for large training samples in face PAD can be mitigated by +using convolutional feature maps. Moreover, a hybrid deep +learning method is introduced in [29] to encode appearance +information from two CNNs where the SVM classifier is used +to discriminate live and spoofed images. Although appearance- +based methods provide improved performance in an intra- +database testing scenario, the performance remains limited +when evaluated on a completely unseen testing domain. +Temporal-based methods: The study reported in [8] es- +timates global motion and amplifies motion cues such as +hand movements or head rotation where BiLSTM is used to +predict the motion. Since global estimation leaves the artifacts +such as black framing at the border of the encoded images +in [8], this issue was solved by using dense sampling with +similarity transformation [22]. Moreover, in order to encode +head movements, eye-blinking, and lip movements, a dynamic +mode decomposition (DMD) method is introduced to capture +the temporal cues from frame sequences [30]. Eulerian motion +magnification is used to magnify the facial expressions in [31]. +Then, local descriptors such as HOOF and LBP are utilized +to improve the classification performance. Photoplethysmogra- +phy (rPPG) signal was found to be crucial to improve the face +PAD performance [32]. A unified framework based on CNN- +BiLSTM is used to capture both appearance and temporal cues +in [29]. A study conducted in [33] shows that the spontaneous +blinking of a person provides an intrinsic detection cue to +improve live face detection. A dense optical flow scheme is +proposed to estimate the motion of two successive frames +in [34]. The authors claimed that real and attack videos +have different optical flow motion patterns which help to +improve the PAD performance. A 3D CNN model is employed +to capture both spatial and temporal information in [35]. +A combined CNN-RNN model is developed to capture the +auxiliary information (i.e., the depth map and rPPG signals) +for improving the detection performance [36]. However, when +the temporal and appearance-based methods are employed in +a cross-dataset scenario, the detection performance remains +vulnerable to degradation due to real-world variations (such +as user demographics, input cameras, and variations in illu- +mination). Therefore, domain generalization that aims to learn +from several source domains becomes significant while dealing +with presentation attack detection. +Deep Domain Generalization methods: Several deep do- +main generalization methods have been introduced to im- +prove the generalization ability of face PAD. For instance, +a domain adaptation method that generates pseudo-labeled +samples named cyclically disentangled feature translation net- +work (CDFTN) is proposed in [37]. Chuang et al proposed +to improve the generalization based on one-side triplet loss +[38]. A two-stream network is utilized to fuse the input RGB +image and meta-pattern learning was proposed to improve +the generalization [13]. A cross-adversarial training scheme +is proposed to improve the generalization by minimizing +the correlation among two sets of features [16]. The work +reported in [14], aims to learn a generalized feature space +by designing the target data to the source-domain style and +called Generative Domain Adaptation (GDA). A hypothesis +verification framework is proposed in [15] where two hy- +pothesis verification modules are utilized for improving the +generalization. A novel Shuffled Style Assembly Network +(SSAN) is introduced by aligning multiple source domains +into a stylized feature space and domain generalization was +improved by a contrastive learning strategy [39]. To select +common features space, adversarial learning is proposed and +aggregation of live faces is performed to achieve a generalized +feature space in [12]. However, there is no consensus that the +pre-defined distributions can be considered the optimal ones +for the feature space. Thus, we argue that a model can un- +derstand faces much better by simply aligning multiple source +domains based on the idea of collaborative ensemble learning. +In particular, the generalized feature space can automatically +capture spatiotemporal inconsistencies based on the knowledge +provided by multiple source domains. +III. The Proposed Method +Figure.3 illustrates the overall framework. Firstly, we +present a method to show how to synthesize training samples. + +Video sequence +Encoded clip +Synt 1 +Synt 2 +Synt 3JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +4 +Fig. 3: Flow chart of our proposed method. A video of length V is divided into non-overlapping segments of smaller length +v. For each segment, global motion is estimated and the stabilized sequence is accumulated to obtain a spatiotemporal warped +image. Then, the encoded spatiotemporal warped image is morphed with a still image (i.e., the first frame of the segment) by +using alpha compositing. Since different alpha values are used to create multiple synthetic images, we build multiple classifiers +on these synthetic images to form stacking-based ensemble learning for improving the generalization of face PAD. +The purpose of synthesis is to bring spatiotemporal artifacts +that can be used to train multiple individual models for +understanding the relationship between them. Secondly, a +unified CNN-RNN network is proposed due to the fact that +mainstream 2D CNN frameworks cannot deal with sequential +data (i.e., sequences to sequences). Then, model stacking is +designed in such a way that it can minimize the weakness and +maximize the strengths of every individual model based on +the meta-learner. Lastly, the model interpretation is provided to +investigate the contribution of synthetic data on which the deep +model mainly relies. Each step is explained in the following +sub-sections. +A. 2D Virtual Synthesis +To generate synthetic samples, a video V is equally divided +into P non-overlapping segments, i.e., V = {Sk}P +s=1, where +Sk is the k-th segment. The length of each segment is set +to be (w = 40) frames. For each segment, features are +extracted from the fixed (first) and moving (second) image +of the segment. In particular, the FAST feature detector [40] +is utilized to detect interest points and then FREAK descriptor +[41] extracts the features to collect points of interest from both +frames. Since salient image features are extracted, the next step +is interest points matching where Hamming distance (HD) is +utilized in our work. The inter-frame parameters are estimated +throughout the whole length of the segment (since the first +frame) by using the rigid (Euclidean space) transformation. As +the name suggests, rigid transformation preserves the distance +and angles (i.e, distance between two points remains the same). +The rigid transformation matrix M is a 3×3 matrix. We find +the 2D pixel coordinates in Cartesian coordinate system by +estimating the translation map from M. Let [a, b, 1]T illustrate +the homogeneous coordinates in moving image and [a′, b′, 1]T +define the coordinates in the fixed image, we have +� +� +a′ +b′ +1 +� +� = +� +� +d11 +d12 +d13 +d21 +d22 +d23 +d31 +d32 +d33 +� +� +� +� +a +b +1 +� +� +(1) +and pixel shift can be calculated as +�∆a +∆b +� += +�a′ − a +b′ − b +� +(2) +To eliminate false-matching points and robust estimation of the +geometric transformation between the frames, we use the M- +estimator Sample Consensus (MSAC) algorithm [42] to detect +outliers and remove false matching points. To obtain warped +images, we simply average the stabilized frame sequences +using the following aggregation function: +ev = 1 +w +w +� +k=1 +evk, +(3) + +Moving 2D Image +Input video +Convert to +grayscale +Still image +(Target) +Segment i +Blending +Grayscale Image Points detection Feature's matching +Calculate rigid +Warp the +Segment 2 +Fixed2D Image +transformation +moving +matrix T +image with T +Convert to +Synthetic +grayscale +images +Segment N +02 +Model 1 +ve +Model 2 +Stacking +RNN +RNN +RNN +Spoof +Model 3 +f3 +Training set +Convolutional layers +Recurrent neural networks +Stacking ensembleJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +5 +where w denotes the total number of selected frames in +segment k for video V . By the above aggregation, the average +over frames directly merges temporal information, and the +image registration combines available spatial reference infor- +mation. Figure.4 shows the effectiveness of the proposed video +distillation scheme. The results demonstrate that the removal +of global motion must be taken into account before the feature +extraction step during the development of a face PAD model. +Since our target is to predict the temporal inconsistencies, +a synthetic image is generated in such a way that every +spatiotemporal encoded image acquired from Eq.3 is blended +into the first (still) image of the segment to obtain a synthetic +image. By doing this, we make sure that the synthetic image +would never leave the space of the human face (see Fig.2). +Thus, the proposed blending process involves two steps: 1) +obtain a source image (i.e., a spatiotemporal encoded image +from a video distillation technique), and 2) target image: +choosing a first (still) image of each segment to blend into +a source image (usually known as cross dissolving). Let’s +assume that we blend source image (P1) over target image +(P2) as: +Pmorph(a, b) = αP1(a, b) + (1 − α)P2(a, b) +(4) +where α is the morphing weight (0 < α ≤ 1). Thus, a +synthetic image is obtained at new location Pmorph(a, b) gets +α percentage from αP1(a, b) and (1 − α) from P2(a, b) [43]. +It is worthwhile to mention that the proposed video dis- +tillation scheme is inspired by our previous works [8], [22] +that estimate global motion. Thus, benefiting from the video +distillation nature of the previous methods, we extend our +previous works to generate synthetic samples by introducing +a cross-dissolve. Moreover, we use the FREAK descriptor +and rigid transformation to estimate inter-frame motion. By +doing this, the computation cost of the method is significantly +reduced (We further discuss this argument in section IV). +B. Recurrent Neural Network (RNN) +Deep learning methods based on 2D Convolutional Neural +Networks (CNNs) have shown an improved performance than +classical machine learning approaches [9], [6], [7]. However, +the mainstream 2D CNN frameworks focus on spatial infor- +mation, thus lacking the capacity to understand sequential +data. Specifically, CNNs do not have a memory mechanism in +order to capture the temporal relations. Motivated by the fact +that recurrent neural networks (RNNs) can deal with temporal +information, we develop a unified framework consisting of +CNN-RNN to encode complementary information between +frames. In particular, a CNN is fine-tuned on the labeled +dataset in the first stage. Then, the fine-tuned features are +extracted from the pooling layer and used as input to train +a Long-short-term memory (LSTM) [44] network. +The LSTM is the most popular RNN architecture and +capable of learning long-term dependencies. It is composed of +memory cell (Ce), an input gate (ie), an output gate (oe) and a +forget gate (ge). The input gate governs the information flow +into the cell by multiplying the cell’s non-linear transformation +of inputs me. The output gate decides how much information +Fig. 4: (a) We computed the mean of the raw video frames to +visualize the global motion that shows a great deal of distor- +tion in the encoded image. (b) The proposed spatiotemporal +encoded images after removing the global motion. +from the cell is used to compute the output activation of the +LSTM unit. The forget gate regulates the extent to which a +value remains in the cell. The LSTM unit updates for time +step e are: +� +��� +ge +ie +me +oe +� +��� = +� +��� +σ +σ +tanh +σ +� +��� H · [pe−1, xe] +(5) +Ce = ge ⊙ Ce−1 + me ⊙ ie +(6) +pe = tanh(Ce) ⊙ oe +(7) +where xe is the input at the current time-step, ie is the +current cell state, g, i, and m represent input gate activation, +forget gate activation and output gate activation, respectively. +σ illustrates the logistic sigmoid function and ⊙ represents +element-wise multiplication. The fully connected and softmax +layer is used for detecting real and fake images. +C. Model Stacking +Ensemble learning has been supported by multiple ap- +proaches like bagging, boosting, or stacking which results +in a better generalization of the learning models [45]. Es- +pecially, stacking is one of the integration techniques that +involves combining the predictions based on the different +weak models’ predictions wherein the meta-learning model +is used to integrate the output of base models [46]. One of +the common approaches in stacked ensemble learning is to +develop a bench of T Tier-1 classifiers S1, S2, S3, ..., SN based +on cross-validation to the training sample [47]. +Rather than focusing on the prediction of a single model, +we train diverse RNN-based sub-models in our work with +different synthetic training samples to predict the temporal +inconsistencies from the data. In particular, the LSTM [44] +and the Bidirectional LSTM (BiLSTM) [48] with different +hidden layers are trained on three synthetic sets where each +sub-model works independently to specialize in its own source +domain. To better understand the learning of sub-models, Fig.5 +represents the proposed validation scheme, where each RNN +is trained with k-1 folds, k-2 folds, and k-3 folds to get the + +(a) Encoded images with global motion +(b) Encoded images after removing global motionJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +6 +Fig. 5: The proposed validation for ensemble learning. +TABLE I: BiLSTM architectures and parameters. +First Architecture +Second Architecture +Third Architecture +No. of layers +1 +1 +1 +Layers type +LSTM +BiLSTM +LSTM +No. of units +500 +20 +100 +Optimizer +ADAM +ADAM +ADAM +learning rate +0.0001 +0.0001 +0.001 +Cost function +cross entropy +cross entropy +cross entropy +TABLE II: Meta model architecture and parameters. +No. of layers +1 +Layers type +LSTM +No. of units +20 +Optimizer +ADAM +learning rate +0.0001 +Cost function +cross entropy +most out of the stacking. Thus, by making experts on different +training subsets, we reinforce each model to concentrate on +different aspects of data (i.e., temporal inconsistencies), such +as one model can focus on certain type of features using a +subset of synthetic data. Similarly, another model can perform +better on the others. We then combine the predictions from +these experts (sub-models) models by running another model +called a meta-learner (meta-classifier). By doing this, the meta- +learner helps to maximize the strengths of every individual +model and reduce generalization errors. +Table I shows the architectures and parameters of the base +models, while Table II depicts the meta-model architecture. It +is worth mentioning here that we accumulate the output of the +three base models’ validation sets as the new validation set for +training the meta-model. This way, the meta-model will make +the final test prediction on the test set. +D. Interpretation of a deep neural network +Interpretation is essential to observe what learning patterns +in data are important but there is no clear consensus that +how interpretability should be best defined in the context +of machine learning. Although explanation methods intend +to make neural networks more trustworthy and interpretable, +the question arises of how some features favor deep learning +to make such a valuable prediction. For instance, synthetic +samples in our work are found to be more useful to train a deep +model and it shows better interpretability in comparison to the +same model trained without synthetic samples. This is due to +the fact that the motion cues which are naturally available in +the frame sequences are "easy to learn" for the model, and play +an important role in model optimization. Thus, the importance +of interpretation is becoming increasingly popular and leads +to useful or promising findings. +In our work, Gradient-weighted class activation mapping +(denoted as Grad-CAM) [49], Occlusion sensitivity maps +(denoted as OCC-SEN) [50], Gradient Attribution map using +Guided Backpropagation (denoted as Grad-ATT) [51], and +locally interpretable model-agnostic explanations (denoted as +LIME) [52] are utilized to understand what patterns in data +are deemed important or make the contributions to the final +decision. In particular, this enables us to trust the behavior +of the developed deep learning model, and/or further tune +the model by observing its interpretations. In particular, we +extract visualization maps from pretrained DenseNet-201 [57] +convolutional neural network for all of the methods above +in our experiments. In Fig.6, we visualize diverse sets of +synthetic images from the CASIA datasets. The first four rows +show print attack images while the next four rows show replay +attack images. Each visualization method captures the class +discriminative region thanks to the proposed video distillation +and synthetic data generation scheme that force the network to +use more subtle cues for its correct classification. In particular, +the first row shows that the neurons in the deep convolutional +layers focus on the paper’s texture, and hand movement cues. +However, Grad-ATT [51] interpretation shows that the model +also takes background as context to make the prediction. +Surprisingly, this issue is eliminated by the proposed synthetic +data generation scheme where the second, third, and fourth +row shows that the model only considers motion cues, the +surface edges and barely touches the background context. +In case of a replay attack, the remaining rows show that +the tablet screen and hand movement provide discriminative +information for the model prediction. Since we cannot present +this for every image from the dataset, we observed that the +mouth information, eye blinking, or head rotation contribute +positively to distinguishing live and spoofed images. Thus, +interpretation from the above methods demonstrates that the +proposed learning model is focusing on the correct features of +the input data, and the model’s decision can be viewed in a +human-understandable way. Moreover, the proposed synthetic +data generation method provides informative RGB images and +helps the model to make the features of spoofed faces more +dispersed which allows a better class boundary to generalize +well to the target domain. +IV. Experimental analysis of using open datasets +To assess the effectiveness of the synthesized face im- +ages, four publicly available databases are used: OULU-NPU +database [58] (denoted as O), CASIA Face Anti-Spoofing +database (denoted as C) [59], Idiap Replay-Attack database +[60] (denoted as I), and MSU Mobile Face Spoofing database +[26] (denoted as M). The performance is evaluated in-terms +of Half Total Error Rate (HTER) (half of the summation of +false acceptance rate and false rejection rate) and Area Under +Curve on the target testing dataset. + +Fold 0 +Fold 1 +Fold 2 +Fold 3 +Train set +Synthetic set 1 +Synthetic set 2 +Synthetic set 3JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +7 +Fig. 6: Visualization of feature maps. The types of images are labelled in the first column. The second column shows the +original encoded and synthetic images. The third column illustrates the feature maps from Grad-CAM [49] while the fourth +column shows the feature maps from occlusion sensitivity maps [50]. Similarly, the fifth and sixth column visualize the features +maps from Gradient Attribution map using Guided Backpropagation [51], and locally interpretable model-agnostic explanations +[52], respectively. The last column shows the masked images obtained from LIME predictions. +A. Implementation details +All the images are resized to 224 × 224 according to +the input requirement of pretrained DenseNet-201 [57] ar- +chitecture. The CNN model is fine-tuned by using Stochastic +Gradient Descent (SGD) optimizer with a validation frequency +of 30, and mini-batch size of 32. We set the learning rate +up to 0.0001, and do not use fixed size epochs because an +early stopping function [61] is utilized to stop the model +automatically to prevent overfitting. +During the ensemble learning stage, the CNN model is fine- +tuned with original encoded video clips and three different +synthetic sets separately. Then, the features from each fine- +tuned model are used as input to train three diverse RNN +models. In particular, the Adam optimizer is utilized with a +validation frequency of 30. The learning rate is set to 0.0001, +and the weights are initialized with He initializer [62] for the +first LSTM (Sub-model 1) model. We do not set the fixed +epochs because an early stopping function [61] was used to +prevent overfitting. For training the second sub-model, the +BiLSTM is trained with the hidden layer dimension of 20. +The other parameters were kept the same as sub-model 1. +For the third sub-model, the LSTM model is trained with the +hidden layer dimension of 100 by decreasing the learning rate +of 0.001. For the data synthetic method, we generate three +synthetic samples, in which 0.5, 1.0, and 1.5 alpha values +are used to expand the training images. In order to train the +meta-model, the epochs size 80, the Gradient threshold 1, and +a hidden layer dimension of 20 was used to train the meta- + +Image Type +Grad-CAM +OCC-SEN +Grad-ATT +LIME +Masked +Encoded image +Synthetic sample +Synthetic sample +2 +Synthetic sample +3 +Encoded image +Synthetic sample +Synthetic sample +2 +Synthetic sampleJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +8 +TABLE III: Performance evaluation using MSU-MFSD (M), CASIA-MFSD (C), replay-attack (I), and OULU-NPU (0) +databases. Comparison results are obtained directly from the corresponding papers. +O&C&I to M +O&M&I to C +O&C&M to I +I&C&M to O +Method +HTER(%) +AUC(%) +HTER(%) +AUC(%) +HTER(%) +AUC(%) +HTER(%) +AUC(%) +MADDG [11] +17.69 +88.06 +24.50 +84.51 +22.19 +84.99 +27.89 +80.02 +DAFL [10] +14.58 +92.58 +17.41 +90.12 +15.13 +95.76 +14.72 +93.08 +SSDG-R [17] +07.38 +97.17 +10.44 +95.94 +11.71 +96.59 +15.61 +91.54 +DR-MD [9] +17.02 +90.10 +19.68 +87.43 +20.87 +86.72 +25.02 +81.47 +MA-Net [6] +20.80 +- +25.60 +- +24.70 +- +26.30 +- +RFMetaFAS [7] +13.89 +93.98 +20.27 +88.16 +17.30 +90.48 +16.45 +91.16 +FAS-DR-BC(MT) [53] +11.67 +93.09 +18.44 +89.67 +11.93 +94.95 +16.23 +91.18 +ADL [12] +05.00 +97.58 +10.00 +96.85 +12.07 +94.68 +13.45 +94.43 +ResNet-BiLSTM w/DS [3] +04.12 +99.93 +07.04 +99.87 +13.48 +97.42 +41.33 +88.48 +HFN + MP [13] +05.24 +97.28 +09.11 +96.09 +15.35 +90.67 +12.40 +94.26 +Cross-ADD [16] +11.64 +95.27 +17.51 +89.98 +15.08 +91.92 +14.27 +93.04 +ASGS [22] +05.91 +99.88 +10.21 +99.86 +45.84 +76.09 +13.54 +99.73 +GDA [14] +09.20 +98.00 +12.20 +93.00 +10.00 +96.00 +14.40 +92.60 +SSAN-R [39] +06.67 +98.75 +10.00 +96.67 +08.88 +96.79 +13.72 +93.63 +FG +HV [15] +09.17 +96.92 +12.47 +93.47 +16.29 +90.11 +13.58 +93.55 +Ensemble (CNN-RNN) +04.02 +99.95 +06.97 +99.97 +33.49 +93.16 +10.91 +99.89 +TABLE IV: The results of cross-dataset testing on limited source domains. The comparison results are obtained directly from +the corresponding papers. +O&I to M +M&I to C +O&I to C +O&M to I +C&M to O +Method +HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) +Supervised [54] +12.1 +94.2 +30.4 +77.0 +18.0 +90.1 +16.8 +93.8 +17.9 +89.5 +Mean-Teacher [55] +19.6 +86.5 +31.1 +76.6 +23.7 +84.9 +18.4 +86.0 +23.5 +84.9 +USDAN [56] +15.8 +88.1 +35.6 +69.0 +33.3 +72.7 +19.8 +87.9 +20.2 +88.3 +EPCR-labeled [54] +12.5 +95.3 +18.9 +89.7 +18.9 +89.7 +14.0 +92.4 +17.9 +90.9 +EPCR-unlabeled [54] +10.4 +94.5 +25.4 +83.8 +16.7 +91.4 +12.4 +94.3 +17.8 +91.3 +Ensemble (CNN-RNN) 07.8 +98.5 +17.1 +94.3 +12.5 +97.1 +15.1 +93.1 +14.7 +93.4 +learner. For reproducibility of our results, we keep the same +parameter settings for conducting the experiments on all the +databases. +B. Comparison against the state-of-the-art methods +To compare the performance with the recently introduced +domain generalization methods, we conduct cross-dataset test- +ing where the model is trained on three source databases +and evaluated on a completely unseen database using the +leave-one-out (LOO) strategy. In particular. the testing sets +of source databases are used as a validation set for computing +the equal error rate. Thus, the HTER is calculated directly +on the target (unseen) dataset for a fair comparison with +the previous methods. As shown in Table III, the proposed +ensemble learning provides the best results on three proto- +cols of O&C&I to M, O&M&I to C, I&C&M to O, and +demonstrates that the model can extract more generalized +differentiation cues for face PAD. This is due to the recently +proposed countermeasures paying more attention by exploring +a common feature space from multiple source domains that +only fit data in the source domains [17]. In contrast to the +existing approaches where adversarial learning [12], generative +domain adaptation [14] and meta-learning [13] has been used, +the proposed ensemble learning improves the generalization by +exploiting the relationship of multiple trained models which +are expert in their own source domain, but ensure that meta- +learner can take complementary information from them to +improve the generalization of face PAD model. +C. Experiment on Limited Source Domains. +We also consider the scenario of a limited source domain +by training the model on two source domains instead of three +as shown in Table IV. The model continues to achieve the +best performance on all the target domains. In particular, the +lowest HTER in four protocols and the highest AUC show +that limited source data does not degrade the generalization +capability of our network in a challenging testing scenario. +D. Ablation study +To verify the superiority of our proposed ensemble learning +and the contributions of each sub-model, we conduct exper- +iments for multi-source domains and limited-source domains +separately. Table V reports the numerical results for multi- +source domain settings. The baseline results represent the +performance of the ResNet-BiLSTM model without synthetic +data. These results are based on encoded spatiotemporal im- +ages obtained from the proposed video distillation scheme. +Sub-model 1 represents the results when one set of synthetic +images were added with spatiotemporal encoded images by +using the value of alpha (0.5). The numerical results of CNN +and CNN-RNN show that synthetic images start improving the +model’s performance on all datasets. In particular, the RNN +improves the performance significantly. Similarly, sub-model +2 represents the results with a different set of synthetic images +(i.e., alpha value was increased to 1.0). The proposed model +experienced a slight drop in performance for CNN predictions +but continues to improve the performance of RNN on M, I +and O. Moreover, when we further evaluate the performance + +JOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +9 +TABLE V: Ablation study using cross-database evaluation. +O&C&I to M +O&M&I to C +O&C&M to I +I&C&M to O +Method +HTER(%) +AUC(%) +HTER(%) +AUC(%) +HTER(%) +AUC(%) +HTER(%) +AUC(%) +Baseline w/o synthetic data +19.02 +86.12 +19.52 +87.63 +31.66 +76.22 +35.44 +85.54 +Sub-model 1 (CNN) +18.11 +87.63 +18.66 +86.22 +29.00 +78.39 +36.55 +84.25 +Sub-model 1 (CNN-RNN) +09.97 +99.26 +07.31 +99.98 +36.87 +76.05 +19.90 +99.55 +Sub-model 2 (CNN) +18.82 +91.09 +22.20 +84.49 +35.63 +77.24 +34.01 +74.91 +Sub-model 2 (CNN-RNN) +08.40 +98.64 +10.14 +97.04 +34.44 +77.01 +12.41 +98.55 +Sub-model 3 (CNN) +17.21 +90.87 +23.22 +84.49 +37.33 +75.05 +33.45 +76.14 +Sub-model 3 (CNN-RNN) +06.05 +98.53 +06.08 +99.11 +39.33 +76.41 +14.40 +98.95 +Ensemble (CNN) +08.95 +97.79 +15.80 +95.47 +35.66 +90.19 +12.89 +98.94 +Ensemble (CNN-RNN) +04.02 +99.95 +06.97 +99.97 +33.49 +93.16 +10.91 +99.89 +TABLE VI: Ablation study with limited open-source databases using cross-database evaluation. +O&I to M +M&I to C +O&I to C +O&M to I +C&M to O +Method +HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) +Sub-model 1 19.6 +86.5 +31.1 +76.6 +23.7 +84.9 +22.4 +84.0 +23.5 +84.9 +Sub-model 2 15.8 +88.1 +35.6 +69.0 +33.3 +72.7 +19.8 +87.9 +20.2 +88.3 +Sub-model 3 12.5 +95.3 +18.9 +89.7 +18.9 +89.7 +18.0 +92.4 +17.9 +90.9 +Ensemble +07.8 +98.5 +17.1 +94.3 +12.5 +97.1 +15.1 +93.1 +14.7 +93.4 +(a) +(b) +(c) +Fig. 7: The T-SNE visualization of feature distributions on cross-testing scenarios. (a) shows the feature distribution of the +original encoded video clips, (b) reflects the feature distribution of encoded video clips with a subset of synthetic samples, (c) +shows the feature distribution of meta-learner. +TABLE VII: Average execution time (in seconds) +Dataset +Optical flow [63] ASGS method [22] TSS method [8] Ours +CASIA-FASD +1560 +1487 +1140 +1023 +REPLAY-ATTACK +1082 +1003 +780 +641 +on the third set of synthetic (α = 1.5) images, sub-model 3 +shows that further improvement can be achieved with synthetic +images. When we combine the prediction of these sub-models +and train the meta-learner, we achieve remarkable performance +on three datasets in comparison to state-of-the-art methods +[53],[6], [7],[8],[9],[10],[11]. The quantitative results indicate +that the ensemble learning guided by video distillation scheme +is beneficial to improve the performance for cross-domain face +PAD. +Analysis of limited source domains: In Table VI, we com- +pare the domain generalization ability of our proposed method +when limited source domain databases are accessible (i.e. only +two source datasets). The results indicate that the proposed +method is effective even in challenging cases. We hypothesize +that this improvement is due to the fact that encoded RGB +images with synthetic samples are almost as descriptive as the +entire video. +Comparisons of execution times: We analyze the execution +times of the proposed video distillation technique with the +previous global motion estimation methods [8], [22] and +optical flow[63]. Table VII reports the numerical results in the +total number of seconds used to generate the training samples +on two datasets. All these comparison results were reported +by using a MATLAB environment based on a workstation +with 3.5 GHz Intel Core i7-5930k and 64 GB RAM. One +can see that the proposed global motion estimation technique +is computationally less expensive than the previous motion +estimated methods reported recently in the literature. +E. Visualization and Analysis +To intuitively show the contribution of each sub-model, we +visualize the feature distribution of different features using +t-SNE [64], as illustrated in Fig. 7. The model is trained +on 0+C+I source domains without synthetic samples and +shows a trivial distribution in Fig. 7 (a) with an unclear +interface between live and spoofed samples. One can see +these overlapped areas can be easily misclassified and cause to +degrade the performance. After adding synthetic samples to the +sub-model, as represented in Fig. 7 (b), the feature distribution +improves and provides a relatively clear interface than the +baseline model, that is because the synthetic samples force + +Attack +RealAttack +RealAttack +RealJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +10 +(a) +(b) +(c) +(d) +Fig. 8: The Receiver Operating Characteristics (ROC) curves. (a) O&C&I to M, (b) O&M&I to C, (c) O&C&M to I, and (d) +I&C&M to O are developed by plotting the true positive rate (TPR) against the false positive rate (FPR). +the model to predict the spatiotemporal artifacts. Nonetheless, +when the meta-model is introduced, a well-structured and +compact distribution with a clear interface can be seen in +Fig 7 (c). Thus, our proposed ensemble learning shows good +generalizability on unseen target data. +In Fig.8, we visualize ROC curves to show how much the +model is capable of distinguishing real and attack classes. As +illustrated in Fig.8, the meta-model on all datasets achieves +more than 90% AUC which is a very impressive performance +on unseen testing sets. The ROC curve is plotted with TPR +against the FPR where FPR is on the x-axis and TPR is on the +y-axis. In particular, the meta-model (ensemble) drag curves +closer to the top-left corner indicate better performance. +V. Conclusions +In this paper, we show that ensemble learning represents an +interesting research direction for improving the generalization +of face PAD. In particular, the model is comprised of multiple +synthetic source domains, and each sub-model predicts the +spatiotemporal inconsistencies based on their similarity to each +training domain. Besides, a meta-learner is introduced to take +the complementary information from each sub-model. Based +on the experimental results on four benchmark datasets, the +proposed method exhibits better performance than a single +model trained only on original training data. Thus, using +ensemble stacking is shown to outperform the existing state- +of-the-art generalization methods. Finally, the interpretation of +the model shows that capturing the motion information is quite +helpful to improve the generalization ability of the proposed +method. Our future work will focus on the development of +robust motion estimation methods in end-to-end learning to +improve the generalization of face PAD. . +VI. Declaration of Competing Interest +The authors have no conflict of interest that could have +appeared to influence the work reported in this paper. +VII. Acknowledgments +This work is supported by the Center for Machine Vision +and Signal Analysis (CMVS) in the Faculty of Information +Technology and Electrical Engineering (ITEE) at University +of Oulu, Finland. +References +[1] J. H. Kim, J. Jang, Y. Kim, and D. Nan, “A structural topic model for +exploring user satisfaction with mobile payments,” Computers, Materials +and Continua, vol. 73, no. 2, pp. 3815–3826, 2022. +[2] P. J. Grother, M. L. Ngan, K. K. Hanaoka, et al., “Ongoing face +recognition vendor test (frvt) part 2: Identification,” 2018. +[3] U. Muhammad and M. Oussalah, “Face anti-spoofing from the perspec- +tive of data sampling,” Electronics Letters, 2022. +[4] U. Muhammad and M. Oussalah, “Self-supervised face presentation +attack detection with dynamic grayscale snippets,” arXiv preprint +arXiv:2208.13070, 2022. +[5] Z. Boulkenafet, J. Komulainen, and A. Hadid, “Face spoofing detec- +tion using colour texture analysis,” IEEE Transactions on Information +Forensics and Security, vol. 11, no. 8, pp. 1818–1830, 2016. +[6] A. Liu, Z. Tan, J. Wan, Y. Liang, Z. Lei, G. Guo, and S. Z. Li, “Face anti- +spoofing via adversarial cross-modality translation,” IEEE Transactions +on Information Forensics and Security, vol. 16, pp. 2759–2772, 2021. +[7] R. Shao, X. Lan, and P. C. Yuen, “Regularized fine-grained meta face +anti-spoofing,” in Proceedings of the AAAI Conference on Artificial +Intelligence, vol. 34, pp. 11974–11981, 2020. +[8] U. Muhammad, Z. Yu, and J. Komulainen, “Self-supervised 2d face +presentation attack detection via temporal sequence sampling,” Pattern +Recognition Letters, 2022. +[9] G. Wang, H. Han, S. Shan, and X. Chen, “Cross-domain face pre- +sentation attack detection via multi-domain disentangled representation +learning,” in Proceedings of the IEEE/CVF Conference on Computer +Vision and Pattern Recognition, pp. 6678–6687, 2020. +[10] S. Saha, W. Xu, M. Kanakis, S. Georgoulis, Y. Chen, D. P. Paudel, and +L. Van Gool, “Domain agnostic feature learning for image and video +based face anti-spoofing,” in Proceedings of the IEEE/CVF Conference +on Computer Vision and Pattern Recognition Workshops, pp. 802–803, +2020. +[11] R. Shao, X. Lan, J. Li, and P. C. Yuen, “Multi-adversarial discriminative +deep domain generalization for face presentation attack detection,” in +Proceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, pp. 10023–10031, 2019. +[12] M. Liu, J. Mu, Z. Yu, K. Ruan, B. Shu, and J. Yang, “Adversarial +learning and decomposition-based domain generalization for face anti- +spoofing,” Pattern Recognition Letters, vol. 155, pp. 171–177, 2022. +[13] R. Cai, Z. Li, R. Wan, H. Li, Y. Hu, and A. C. Kot, “Learning +meta pattern for face anti-spoofing,” IEEE Transactions on Information +Forensics and Security, vol. 17, pp. 1201–1213, 2022. +[14] Q. Zhou, K.-Y. Zhang, T. Yao, R. Yi, K. Sheng, S. Ding, and L. Ma, +“Generative domain adaptation for face anti-spoofing,” in European +Conference on Computer Vision, pp. 335–356, Springer, 2022. +[15] S. Liu, S. Lu, H. Xu, J. Yang, S. Ding, and L. Ma, “Feature generation +and hypothesis verification for reliable face anti-spoofing,” in Proceed- +ings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 1782– +1791, 2022. +[16] H. Huang, Y. Xiang, G. Yang, L. Lv, X. Li, Z. Weng, and Y. Fu, +“Generalized face anti-spoofing via cross-adversarial disentanglement +with mixing augmentation,” in ICASSP 2022-2022 IEEE International +Conference on Acoustics, Speech and Signal Processing (ICASSP), +pp. 2939–2943, IEEE, 2022. +[17] Y. Jia, J. Zhang, S. Shan, and X. Chen, “Single-side domain generaliza- +tion for face anti-spoofing,” in Proceedings of the IEEE/CVF Conference +on Computer Vision and Pattern Recognition, pp. 8484–8493, 2020. + +0.8 +rate +positive +0.6 +True +0.4 +Sub-model 1 +Sub-model 2 +Sub-model 3 +0.2 +Ensemblelearning +0 +0 +0.2 +0.4 +0.6 +0.8 +1 +Falsepositive rate0.8 +rate +positive +0.6 +Sub-model 1 +0.4 +Sub-model 2 +Sub-model 3 +0.2 +Ensemblelearning +0 +0 +0.2 +0.4 +0.6 +0.8 +Falsepositiverate0.8 +rate +positive +0.6 +0.4 +Sub-model 1 +Sub-model 2 +Sub-model 3 +0.2 +Ensemblelearning +0 +0 +0.2 +0.4 +0.6 +0.8 +Falsepositiverate0.8 +rate +positive +0.6 +True +0.4 +Sub-model 1 +Sub-model 2 +Sub-model 3 +0.2 +Ensemblelearning +0 +0 +0.2 +0.4 +0.6 +0.8 +FalsepositiverateJOURNAL OF LATEX CLASS FILES, VOL. 1, NO. 11, NOVEMBER 2022 +11 +[18] K. Zhou, Y. Yang, Y. Qiao, and T. Xiang, “Domain adaptive ensemble +learning,” IEEE Transactions on Image Processing, vol. 30, pp. 8008– +8018, 2021. +[19] I. J. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, +S. Ozair, A. Courville, and Y. Bengio, “Generative adversarial networks,” +arXiv preprint arXiv:1406.2661, 2014. +[20] J. Donahue, P. Krähenbühl, and T. Darrell, “Adversarial feature learn- +ing,” arXiv preprint arXiv:1605.09782, 2016. +[21] A. Radford, L. Metz, and S. Chintala, “Unsupervised representation +learning with deep convolutional generative adversarial networks,” arXiv +preprint arXiv:1511.06434, 2015. +[22] U. Muhammad, J. Zhang, L. Liu, and M. Oussalah, “An adaptive +spatio-temporal global sampling for presentation attack detection,” IEEE +Transactions on Circuits and Systems II: Express Briefs, 2022. +[23] T. d. Freitas Pereira, A. Anjos, J. M. D. Martino, and S. Marcel, +“Lbp- top based countermeasure against face spoofing attacks,” in Asian +Conference on Computer Vision, pp. 121–132, Springer, 2012. +[24] K. Patel, H. Han, and A. K. Jain, “Secure face unlock: Spoof detection on +smartphones,” IEEE transactions on information forensics and security, +vol. 11, no. 10, pp. 2268–2283, 2016. +[25] Z. Boulkenafet, J. Komulainen, X. Feng, and A. Hadid, “Scale space +texture analysis for face anti-spoofing,” in 2016 International Conference +on Biometrics (ICB), pp. 1–6, IEEE, 2016. +[26] D. Wen, H. Han, and A. K. Jain, “Face spoof detection with image +distortion analysis,” IEEE Transactions on Information Forensics and +Security, vol. 10, no. 4, pp. 746–761, 2015. +[27] J. Yang, Z. Lei, S. Liao, and S. Z. Li, “Face liveness detection with +component dependent descriptor,” in 2013 International Conference on +Biometrics (ICB), pp. 1–6, IEEE, 2013. +[28] L. Li and X. Feng, “Face anti-spoofing via deep local binary pattern,” +in Deep Learning in Object Detection and Recognition, pp. 91–111, +Springer, 2019. +[29] U. Muhammad, T. Holmberg, W. C. de Melo, and A. Hadid, “Face anti- +spoofing via sample learning based recurrent neural network (rnn).,” in +BMVC, p. 113, 2019. +[30] S. Tirunagari, N. Poh, D. Windridge, A. Iorliam, N. Suki, and A. T. Ho, +“Detection of face spoofing using visual dynamics,” IEEE transactions +on information forensics and security, vol. 10, no. 4, pp. 762–777, 2015. +[31] S. Bharadwaj, T. I. Dhamecha, M. Vatsa, and R. Singh, “Computa- +tionally efficient face spoofing detection with motion magnification,” in +Proceedings of the IEEE conference on computer vision and pattern +recognition workshops, pp. 105–110, 2013. +[32] S.-Q. Liu, X. Lan, and P. C. Yuen, “Multi-channel remote photoplethys- +mography correspondence feature for 3d mask face presentation attack +detection,” IEEE Transactions on Information Forensics and Security, +vol. 16, pp. 2683–2696, 2021. +[33] L. Sun, G. Pan, Z. Wu, and S. Lao, “Blinking-based live face detec- +tion using conditional random fields,” in International Conference on +Biometrics, pp. 252–260, Springer, 2007. +[34] W. Yin, Y. Ming, and L. Tian, “A face anti-spoofing method based +on optical flow field,” in 2016 IEEE 13th International Conference on +Signal Processing (ICSP), pp. 1333–1337, IEEE, 2016. +[35] H. Li, P. He, S. Wang, A. Rocha, X. Jiang, and A. C. Kot, “Learning +generalized deep feature representation for face anti-spoofing,” IEEE +Transactions on Information Forensics and Security, vol. 13, no. 10, +pp. 2639–2652, 2018. +[36] Y. Liu, A. Jourabloo, and X. Liu, “Learning deep models for face anti- +spoofing: Binary or auxiliary supervision,” in Proceedings of the IEEE +conference on computer vision and pattern recognition, pp. 389–398, +2018. +[37] H. Yue, K. Wang, G. Zhang, H. Feng, J. Han, E. Ding, and J. Wang, +“Cyclically disentangled feature translation for face anti-spoofing,” arXiv +preprint arXiv:2212.03651, 2022. +[38] C.-C. Chuang, C.-Y. Wang, and S.-H. Lai, “Generalized face anti- +spoofing via multi-task learning and one-side meta triplet loss,” arXiv +preprint arXiv:2211.15955, 2022. +[39] Z. Wang, Z. Wang, Z. Yu, W. Deng, J. Li, T. Gao, and Z. Wang, “Domain +generalization via shuffled style assembly for face anti-spoofing,” in +Proceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, pp. 4123–4133, 2022. +[40] E. Rosten and T. Drummond, “Fusing points and lines for high perfor- +mance tracking,” in Tenth IEEE International Conference on Computer +Vision (ICCV’05) Volume 1, vol. 2, pp. 1508–1515, Ieee, 2005. +[41] A. Alahi, R. Ortiz, and P. Vandergheynst, “Freak: Fast retina keypoint,” +in 2012 IEEE conference on computer vision and pattern recognition, +pp. 510–517, Ieee, 2012. +[42] P. H. Torr and A. Zisserman, “Robust parameterization and computation +of the trifocal tensor,” Image and vision Computing, vol. 15, no. 8, +pp. 591–605, 1997. +[43] B. Kamgar-Parsi, W. Lawson, and B. Kamgar-Parsi, “Toward develop- +ment of a face recognition system for watchlist surveillance,” IEEE +Transactions on Pattern Analysis and Machine Intelligence, vol. 33, +no. 10, pp. 1925–1937, 2011. +[44] S. Hochreiter and J. Schmidhuber, “Long short-term memory,” Neural +computation, vol. 9, no. 8, pp. 1735–1780, 1997. +[45] S. Fatemifar, M. Awais, A. Akbari, and J. Kittler, “A stacking ensemble +for anomaly based client-specific face spoofing detection,” in 2020 IEEE +International Conference on Image Processing (ICIP), pp. 1371–1375, +IEEE, 2020. +[46] M. Ganaie, M. Hu, et al., “Ensemble deep learning: A review,” arXiv +preprint arXiv:2104.02395, 2021. +[47] R. Polikar, “Ensemble learning,” in Ensemble machine learning, pp. 1– +34, Springer, 2012. +[48] M. Schuster and K. K. Paliwal, “Bidirectional recurrent neural net- +works,” IEEE transactions on Signal Processing, vol. 45, no. 11, +pp. 2673–2681, 1997. +[49] R. R. Selvaraju, M. Cogswell, A. Das, R. Vedantam, D. Parikh, and +D. Batra, “Grad-cam: Visual explanations from deep networks via +gradient-based localization,” in Proceedings of the IEEE international +conference on computer vision, pp. 618–626, 2017. +[50] M. D. Zeiler and R. Fergus, “Visualizing and understanding convolu- +tional networks,” in European conference on computer vision, pp. 818– +833, Springer, 2014. +[51] J. T. Springenberg, A. Dosovitskiy, T. Brox, and M. Riedmiller, +“Striving for simplicity: The all convolutional net,” arXiv preprint +arXiv:1412.6806, 2014. +[52] M. T. Ribeiro, S. Singh, and C. Guestrin, “" why should i trust you?" +explaining the predictions of any classifier,” in Proceedings of the 22nd +ACM SIGKDD international conference on knowledge discovery and +data mining, pp. 1135–1144, 2016. +[53] Y. Qin, Z. Yu, L. Yan, Z. Wang, C. Zhao, and Z. Lei, “Meta-teacher for +face anti-spoofing,” IEEE Transactions on Pattern Analysis and Machine +Intelligence, 2021. +[54] Z. Wang, Z. Yu, X. Wang, Y. Qin, J. Li, C. Zhao, Z. Lei, X. Liu, S. Li, +and Z. Wang, “Consistency regularization for deep face anti-spoofing,” +arXiv preprint arXiv:2111.12320, 2021. +[55] A. Tarvainen and H. Valpola, “Mean teachers are better role mod- +els: Weight-averaged consistency targets improve semi-supervised deep +learning results,” Advances in neural information processing systems, +vol. 30, 2017. +[56] Y. Jia, J. Zhang, S. Shan, and X. Chen, “Unified unsupervised and +semi-supervised domain adaptation network for cross-scenario face anti- +spoofing,” Pattern Recognition, vol. 115, p. 107888, 2021. +[57] G. Huang, Z. Liu, L. Van Der Maaten, and K. Q. Weinberger, “Densely +connected convolutional networks,” in Proceedings of the IEEE confer- +ence on computer vision and pattern recognition, pp. 4700–4708, 2017. +[58] Z. Boulkenafet, J. Komulainen, L. Li, X. Feng, and A. Hadid, “Oulu-npu: +A mobile face presentation attack database with real-world variations,” +in 2017 12th IEEE international conference on automatic face & gesture +recognition (FG 2017), pp. 612–618, IEEE, 2017. +[59] Z. Zhang, J. Yan, S. Liu, Z. Lei, D. Yi, and S. Z. Li, “A face anti- +spoofing database with diverse attacks,” in 2012 5th IAPR international +conference on Biometrics (ICB), pp. 26–31, IEEE, 2012. +[60] I. Chingovska, A. Anjos, and S. Marcel, “On the effectiveness of local +binary patterns in face anti-spoofing,” in 2012 BIOSIG-proceedings +of the international conference of biometrics special interest group +(BIOSIG), pp. 1–7, IEEE, 2012. +[61] L. Prechelt, “Early stopping-but when?,” in Neural Networks: Tricks of +the trade, pp. 55–69, Springer, 1998. +[62] K. He, X. Zhang, S. Ren, and J. Sun, “Delving deep into rectifiers: +Surpassing human-level performance on imagenet classification,” in +Proceedings of the IEEE international conference on computer vision, +pp. 1026–1034, 2015. +[63] B. K. Horn and B. G. Schunck, “Determining optical flow,” Artificial +intelligence, vol. 17, no. 1-3, pp. 185–203, 1981. +[64] L. Van der Maaten and G. Hinton, “Visualizing data using t-sne.,” +Journal of machine learning research, vol. 9, no. 11, 2008. + diff --git a/3tA0T4oBgHgl3EQfNP_j/content/tmp_files/load_file.txt b/3tA0T4oBgHgl3EQfNP_j/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..bace8aa3a18fe447e5181c95cda8dd010858558d --- /dev/null +++ b/3tA0T4oBgHgl3EQfNP_j/content/tmp_files/load_file.txt @@ -0,0 +1,1187 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf,len=1186 +page_content='JOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 1 Domain Generalization via Ensemble Stacking for Face Presentation Attack Detection Usman Muhammad1, Djamila Romaissa Beddiar1, and Mourad Oussalah1, Fellow, IEEE 1 Center for Machine Vision and Signal Analysis, University of Oulu, Finland Face presentation attack detection (PAD) plays a pivotal role in securing face recognition systems against spoofing attacks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Although great progress has been made in designing face PAD methods, developing a model that can generalize well to an unseen test domain remains a significant challenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, due to different types of spoofing attacks, creating a dataset with a sufficient number of samples for training deep neural networks is a laborious task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' This work addresses these challenges by creating synthetic data and introducing a deep learning-based unified framework for improving the generalization ability of the face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, synthetic data is generated by proposing a video distillation technique that blends a spatiotemporal warped image with a still image based on alpha compositing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Since the proposed synthetic samples can be generated by increasing different alpha weights, we train multiple classifiers by taking the advantage of a specific type of ensemble learning known as a stacked ensemble, where each such classifier becomes an expert in its own domain but a non-expert to others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Motivated by this, a meta-classifier is employed to learn from these experts collaboratively so that when developing an ensemble, they can leverage complementary information from each other to better tackle or be more useful for an unseen target domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Experimental results using half total error rates (HTERs) on four PAD databases CASIA-MFSD (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='97%), Replay-Attack (33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='49%), MSU-MFSD (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02%), and OULU-NPU (10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='91%)) demonstrate the robustness of the method and open up new possibilities for advancing presentation attack detection using ensemble learning with large-scale synthetic data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Index Terms—Face Anti-Spoofing, Ensemble Learning, Deep Learning, Synthetic Data, LSTM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Introduction O VER the past few decades, facial recognition (FR) technology has been frequently used in numerous real- world applications, such as mobile payments, access control, immigration, education, surveillance, and healthcare [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The accuracy of FR is no longer a major concern and the error rate has dropped to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='08%, according to tests conducted by the National Institute of Standards and Technology (NIST) [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Despite great success, a simple FR system might be vulnerable to spoofing, known as a presentation attack.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For instance, print attacks, video replay, and 3D masks are the most common attacks reported recently in the face anti- spoofing domain [3], [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, a number of hand-crafted and deep representation methods have been proposed to protect FR systems against presentation attacks [5], [6], [7], [8], [9], [10], [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Many of them report promising performance in intra- domain testing scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, the performance remains limited in cross-dataset testing scenario due to distributional discrepancy between source domain and the target domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' One of the major reasons that deep-learning-based models are prone to overfitting due to the lack of availability of a sufficient amount of training samples in the source domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Another possible reason might be that many face PAD methods assume that training and testing data come from the same target distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, if a model was trained on cut photo attack images, would it work on mask attack images?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' What if the model trained only on replay attack images and tested in warped photo attacks?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Is it possible to deploy a model that is trained using different illumination conditions and background scenes under control lighting systems?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Answers to Manuscript received January 1, 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' revised August 26, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Correspond- ing author: M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Usman (email: Muhammad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='usman@oulu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='fi) all these questions depend on how a machine learning model can deal with this domain shift problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, to alleviate this issue, domain adaptation (DA) techniques are used to leverage a source dataset and maintain a good accuracy on the target dataset by using unlabeled target data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, in many applications, it is difficult to collect sufficient target data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For instance, in face PAD, hackers are using different types of spoofing attacks which makes it impractical to collect each type of new attack sample in advance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To overcome the domain shift problem, domain generaliza- tion (DG) methods have been introduced to improve the gener- alization [9], [10], [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, the generalization capability of PAD methods remains challenging because either the deep feature-based methods or low-level feature-based methods may not generalize well into new applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Generalizability refers to the performance difference of a model when the PAD models are trained and tuned on one or multiple databases and then tested on a completely unseen database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1, the goal of domain generalization is to use the training samples from one or several different source domains but related domains (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', diverse training datasets) that perform well when evaluated on a completely unseen target domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To improve the generalization, the majority of recent ap- proaches in face PAD such as adversarial learning [12], meta pattern learning [13], generative domain adaptation [14], hypothesis verification [15], or cross-adversarial learning [16], address the domain generalization issue by exploiting a com- mon feature space from multiple source domains, but the performance remains limited due to a substantial distribution difference among source domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For instance, research in [17] relies on a shared feature space and assumes that it would also be invariant to domain shift.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' This assumption has a flaw because when the source domains become more diverse, arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02145v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='CV] 5 Jan 2023 JOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 2 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1: The source domains are trained with diverse sets of synthetic images where the meta-learner seeks complementary information to generalize well to unseen target distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' learning a domain-invariant model becomes more difficult [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For instance, instead of concentrating on some domain- specific differentiation cues such as cut photo texture cues available in the CASIA database, models can be benefited from generalized feature space if more generalized cues are shared by all source domains [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In addition, spoofing attacks have been launched physically by malicious hackers (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', outside the control of the biometric system).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Therefore, building new datasets to collect large samples of fake faces, especially for each type of new attack remain infeasible in the face anti- spoofing domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Although the dominant approaches such as Generative adversarial networks (GANs) [19], Bidirectional GANs [20], the DCGAN [21], can be applied to mitigate the gap between the target domain and the source domain by generating synthetic faces, these models require careful tuning of their parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In this paper, rather than proposing a specific model suited for the intra-database testing scenario, a novel unified framework is introduced based on the idea of stacking-based ensemble learning to improve the generalization of the face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We first generate different sets of synthetic training samples and then train different sub-models on each of the synthetic sets to specialize in their own domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' More specif- ically, our goal is to understand the relationship between the spatiotemporal artifacts that appear in synthetic samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Con- sequently, we train three sub-models in which we investigate the characteristics of these spatiotemporal artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' By doing this, we assume that sub-models that are trained on specific source domains would be experts in domain-specific sources but non-expert in all other source domains as well as the target domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Motivated by this, we train a meta-learner that minimizes the cross-domain generalization error by combining the input predictions of all experts (sub-models).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, our key idea is to train the sub-models separately so that when forming stacking, a meta-learner can leverage complementary information in order to better approach the target domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To achieve our goal, we first introduce a video distillation technique to generate synthetic samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' This is inspired by our previous works [8], [22] that claim estimation of global motion is important for face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Specifically, a 2D image morphing technique is proposed with a combination of a warp and a cross dissolve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The main idea is to blend the encoded spatiotemporal warped images with the still images using alpha blending.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' By doing so, we generate multiple sets of 2D synthetic images with different alpha weights and expand the training samples significantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Several synthetic examples are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We then train different recurrent neural networks with each subset of synthetic data and use the prediction of each subset to train the meta-classifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, the interpretability methods are employed to further assess how robust is the model, by revealing that the most significant areas for determining the deep learning model decision on the PAD task are consistent with motion cues associated with the arti- facts, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', screen sloping, hand movement, material reflection, and expression changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Overall, the main contributions of this study are five-fold: A video distillation technique is proposed to train a 2D CNN on a still image, where “still” encodes both appearance and temporal information from the video sequence into a single RGB image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2D image morphing is introduced to create large-scale synthetic training samples that greatly promote the per- formance of the face anti-spoofing model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Stacked recurrent neural networks are utilized to predict spatiotemporal inconsistencies and then those predictions are employed to form the deep architecture (meta-model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Techniques of interpretation are provided for exploring the decisions made by the employed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The model revealed that the motion cues are the most important factors for distinguishing whether an input image is spoofed or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Experiments on four benchmark datasets, consisting of CASIA-MFSD, Replay-Attack, MSU-MFSD, and OULU-NPU databases, show that our proposed method is significantly superior on three databases in comparison with other state-of-the-art generalization methods used now.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The rest of this work is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Section II discusses the recent developments and related past works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Section III explains all the steps of the proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Section IV shows the implementation details, ablation study, and comparison against several public benchmark datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Section V concludes the entire work and gives suggestions for future research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Literature Review Over the past few years, face PAD methods have re- ceived considerable attention from both academia and in- dustry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In general, these methods can be roughly classified into appearance-based methods and temporal-based methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Appearance-based methods: Traditional appearance-based methods usually extract hand-crafted features such as LBP [23] and SIFT [24] based on various appearance cues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The authors in [5] claimed that color information is crucial and luminance-chrominance color spaces improve the detection Source domain Domain 1 α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 α=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 α=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 Target domain Meta- Domain learner 2 α=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 α=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 α=1 Domain 3 α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 α=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0JOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 3 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2: 2D synthetic samples from CASIA-MFSD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Left col- umn: Video sequence used to generate synthetic samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Right column: Spatiotemporal encoded images morphed with the still image using alpha values of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 (Synt 1), 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 (Synt 2), and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 (Synt 3), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' These synthetic samples can be used for ensemble stacking to significantly improve the face anti-spoofing performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' performance of face PAD in comparison to the RGB and the gray-scale image representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The multiscale filtering approach proposed in [25] was found to be effective where LBP-based multiscale features provide improved performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wen et al [26] utilize image distortion analysis (IDA) and develop an ensemble classifier, where multiple SVM classifiers are implemented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the features are selected based on specular reflection, blurriness, chromatic moment, and color diversity to provide input to SVM classifiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A component- based coding framework is proposed to encode different components of the face in [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To deploy secure face locking on a smartphone, a method is developed based on extracting color distortion, Moiré-pattern analysis, surface reflection, and shape deformation [24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The LBP features are combined with the feature maps of a deep learning model to improve the detection of face PAD in [28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The authors show that the need for large training samples in face PAD can be mitigated by using convolutional feature maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, a hybrid deep learning method is introduced in [29] to encode appearance information from two CNNs where the SVM classifier is used to discriminate live and spoofed images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Although appearance- based methods provide improved performance in an intra- database testing scenario, the performance remains limited when evaluated on a completely unseen testing domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Temporal-based methods: The study reported in [8] es- timates global motion and amplifies motion cues such as hand movements or head rotation where BiLSTM is used to predict the motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Since global estimation leaves the artifacts such as black framing at the border of the encoded images in [8], this issue was solved by using dense sampling with similarity transformation [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, in order to encode head movements, eye-blinking, and lip movements, a dynamic mode decomposition (DMD) method is introduced to capture the temporal cues from frame sequences [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Eulerian motion magnification is used to magnify the facial expressions in [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Then, local descriptors such as HOOF and LBP are utilized to improve the classification performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Photoplethysmogra- phy (rPPG) signal was found to be crucial to improve the face PAD performance [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A unified framework based on CNN- BiLSTM is used to capture both appearance and temporal cues in [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A study conducted in [33] shows that the spontaneous blinking of a person provides an intrinsic detection cue to improve live face detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A dense optical flow scheme is proposed to estimate the motion of two successive frames in [34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The authors claimed that real and attack videos have different optical flow motion patterns which help to improve the PAD performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A 3D CNN model is employed to capture both spatial and temporal information in [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A combined CNN-RNN model is developed to capture the auxiliary information (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', the depth map and rPPG signals) for improving the detection performance [36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, when the temporal and appearance-based methods are employed in a cross-dataset scenario, the detection performance remains vulnerable to degradation due to real-world variations (such as user demographics, input cameras, and variations in illu- mination).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Therefore, domain generalization that aims to learn from several source domains becomes significant while dealing with presentation attack detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Deep Domain Generalization methods: Several deep do- main generalization methods have been introduced to im- prove the generalization ability of face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For instance, a domain adaptation method that generates pseudo-labeled samples named cyclically disentangled feature translation net- work (CDFTN) is proposed in [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chuang et al proposed to improve the generalization based on one-side triplet loss [38].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A two-stream network is utilized to fuse the input RGB image and meta-pattern learning was proposed to improve the generalization [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A cross-adversarial training scheme is proposed to improve the generalization by minimizing the correlation among two sets of features [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The work reported in [14], aims to learn a generalized feature space by designing the target data to the source-domain style and called Generative Domain Adaptation (GDA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A hypothesis verification framework is proposed in [15] where two hy- pothesis verification modules are utilized for improving the generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A novel Shuffled Style Assembly Network (SSAN) is introduced by aligning multiple source domains into a stylized feature space and domain generalization was improved by a contrastive learning strategy [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To select common features space, adversarial learning is proposed and aggregation of live faces is performed to achieve a generalized feature space in [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, there is no consensus that the pre-defined distributions can be considered the optimal ones for the feature space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, we argue that a model can un- derstand faces much better by simply aligning multiple source domains based on the idea of collaborative ensemble learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the generalized feature space can automatically capture spatiotemporal inconsistencies based on the knowledge provided by multiple source domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The Proposed Method Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 illustrates the overall framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Firstly, we present a method to show how to synthesize training samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Video sequence Encoded clip Synt 1 Synt 2 Synt 3JOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 4 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 3: Flow chart of our proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A video of length V is divided into non-overlapping segments of smaller length v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For each segment, global motion is estimated and the stabilized sequence is accumulated to obtain a spatiotemporal warped image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Then, the encoded spatiotemporal warped image is morphed with a still image (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', the first frame of the segment) by using alpha compositing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Since different alpha values are used to create multiple synthetic images, we build multiple classifiers on these synthetic images to form stacking-based ensemble learning for improving the generalization of face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The purpose of synthesis is to bring spatiotemporal artifacts that can be used to train multiple individual models for understanding the relationship between them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Secondly, a unified CNN-RNN network is proposed due to the fact that mainstream 2D CNN frameworks cannot deal with sequential data (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', sequences to sequences).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Then, model stacking is designed in such a way that it can minimize the weakness and maximize the strengths of every individual model based on the meta-learner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lastly, the model interpretation is provided to investigate the contribution of synthetic data on which the deep model mainly relies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Each step is explained in the following sub-sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2D Virtual Synthesis To generate synthetic samples, a video V is equally divided into P non-overlapping segments, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', V = {Sk}P s=1, where Sk is the k-th segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The length of each segment is set to be (w = 40) frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For each segment, features are extracted from the fixed (first) and moving (second) image of the segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the FAST feature detector [40] is utilized to detect interest points and then FREAK descriptor [41] extracts the features to collect points of interest from both frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Since salient image features are extracted, the next step is interest points matching where Hamming distance (HD) is utilized in our work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The inter-frame parameters are estimated throughout the whole length of the segment (since the first frame) by using the rigid (Euclidean space) transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' As the name suggests, rigid transformation preserves the distance and angles (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e, distance between two points remains the same).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The rigid transformation matrix M is a 3×3 matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We find the 2D pixel coordinates in Cartesian coordinate system by estimating the translation map from M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Let [a,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' b,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1]T illustrate the homogeneous coordinates in moving image and [a′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' b′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1]T define the coordinates in the fixed image,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' we have � � a′ b′ 1 � � = � � d11 d12 d13 d21 d22 d23 d31 d32 d33 � � � � a b 1 � � (1) and pixel shift can be calculated as �∆a ∆b � = �a′ − a b′ − b � (2) To eliminate false-matching points and robust estimation of the geometric transformation between the frames,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' we use the M- estimator Sample Consensus (MSAC) algorithm [42] to detect outliers and remove false matching points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To obtain warped images,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' we simply average the stabilized frame sequences using the following aggregation function: ev = 1 w w � k=1 evk,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=" (3) Moving 2D Image Input video Convert to grayscale Still image (Target) Segment i Blending Grayscale Image Points detection Feature's matching Calculate rigid Warp the Segment 2 Fixed2D Image transformation moving matrix T image with T Convert to Synthetic grayscale images Segment N 02 Model 1 ve Model 2 Stacking RNN RNN RNN Spoof Model 3 f3 Training set Convolutional layers Recurrent neural networks Stacking ensembleJOURNAL OF LATEX CLASS FILES," metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 5 where w denotes the total number of selected frames in segment k for video V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' By the above aggregation, the average over frames directly merges temporal information, and the image registration combines available spatial reference infor- mation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 shows the effectiveness of the proposed video distillation scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The results demonstrate that the removal of global motion must be taken into account before the feature extraction step during the development of a face PAD model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Since our target is to predict the temporal inconsistencies, a synthetic image is generated in such a way that every spatiotemporal encoded image acquired from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 is blended into the first (still) image of the segment to obtain a synthetic image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' By doing this, we make sure that the synthetic image would never leave the space of the human face (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, the proposed blending process involves two steps: 1) obtain a source image (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', a spatiotemporal encoded image from a video distillation technique), and 2) target image: choosing a first (still) image of each segment to blend into a source image (usually known as cross dissolving).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Let’s assume that we blend source image (P1) over target image (P2) as: Pmorph(a, b) = αP1(a, b) + (1 − α)P2(a, b) (4) where α is the morphing weight (0 < α ≤ 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, a synthetic image is obtained at new location Pmorph(a, b) gets α percentage from αP1(a, b) and (1 − α) from P2(a, b) [43].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' It is worthwhile to mention that the proposed video dis- tillation scheme is inspired by our previous works [8], [22] that estimate global motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, benefiting from the video distillation nature of the previous methods, we extend our previous works to generate synthetic samples by introducing a cross-dissolve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, we use the FREAK descriptor and rigid transformation to estimate inter-frame motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' By doing this, the computation cost of the method is significantly reduced (We further discuss this argument in section IV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Recurrent Neural Network (RNN) Deep learning methods based on 2D Convolutional Neural Networks (CNNs) have shown an improved performance than classical machine learning approaches [9], [6], [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, the mainstream 2D CNN frameworks focus on spatial infor- mation, thus lacking the capacity to understand sequential data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Specifically, CNNs do not have a memory mechanism in order to capture the temporal relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Motivated by the fact that recurrent neural networks (RNNs) can deal with temporal information, we develop a unified framework consisting of CNN-RNN to encode complementary information between frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, a CNN is fine-tuned on the labeled dataset in the first stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Then, the fine-tuned features are extracted from the pooling layer and used as input to train a Long-short-term memory (LSTM) [44] network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The LSTM is the most popular RNN architecture and capable of learning long-term dependencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' It is composed of memory cell (Ce), an input gate (ie), an output gate (oe) and a forget gate (ge).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The input gate governs the information flow into the cell by multiplying the cell’s non-linear transformation of inputs me.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The output gate decides how much information Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 4: (a) We computed the mean of the raw video frames to visualize the global motion that shows a great deal of distor- tion in the encoded image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' (b) The proposed spatiotemporal encoded images after removing the global motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' from the cell is used to compute the output activation of the LSTM unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The forget gate regulates the extent to which a value remains in the cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The LSTM unit updates for time step e are: � ��� ge ie me oe � ��� = � ��� σ σ tanh σ � ��� H · [pe−1, xe] (5) Ce = ge ⊙ Ce−1 + me ⊙ ie (6) pe = tanh(Ce) ⊙ oe (7) where xe is the input at the current time-step, ie is the current cell state, g, i, and m represent input gate activation, forget gate activation and output gate activation, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' σ illustrates the logistic sigmoid function and ⊙ represents element-wise multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The fully connected and softmax layer is used for detecting real and fake images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Model Stacking Ensemble learning has been supported by multiple ap- proaches like bagging, boosting, or stacking which results in a better generalization of the learning models [45].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Es- pecially, stacking is one of the integration techniques that involves combining the predictions based on the different weak models’ predictions wherein the meta-learning model is used to integrate the output of base models [46].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' One of the common approaches in stacked ensemble learning is to develop a bench of T Tier-1 classifiers S1, S2, S3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', SN based on cross-validation to the training sample [47].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Rather than focusing on the prediction of a single model, we train diverse RNN-based sub-models in our work with different synthetic training samples to predict the temporal inconsistencies from the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the LSTM [44] and the Bidirectional LSTM (BiLSTM) [48] with different hidden layers are trained on three synthetic sets where each sub-model works independently to specialize in its own source domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' To better understand the learning of sub-models, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 represents the proposed validation scheme, where each RNN is trained with k-1 folds, k-2 folds, and k-3 folds to get the (a) Encoded images with global motion (b) Encoded images after removing global motionJOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 6 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 5: The proposed validation for ensemble learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' TABLE I: BiLSTM architectures and parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' First Architecture Second Architecture Third Architecture No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' of layers 1 1 1 Layers type LSTM BiLSTM LSTM No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' of units 500 20 100 Optimizer ADAM ADAM ADAM learning rate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='001 Cost function cross entropy cross entropy cross entropy TABLE II: Meta model architecture and parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' of layers 1 Layers type LSTM No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' of units 20 Optimizer ADAM learning rate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0001 Cost function cross entropy most out of the stacking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, by making experts on different training subsets, we reinforce each model to concentrate on different aspects of data (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', temporal inconsistencies), such as one model can focus on certain type of features using a subset of synthetic data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Similarly, another model can perform better on the others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We then combine the predictions from these experts (sub-models) models by running another model called a meta-learner (meta-classifier).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' By doing this, the meta- learner helps to maximize the strengths of every individual model and reduce generalization errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Table I shows the architectures and parameters of the base models, while Table II depicts the meta-model architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' It is worth mentioning here that we accumulate the output of the three base models’ validation sets as the new validation set for training the meta-model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' This way, the meta-model will make the final test prediction on the test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Interpretation of a deep neural network Interpretation is essential to observe what learning patterns in data are important but there is no clear consensus that how interpretability should be best defined in the context of machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Although explanation methods intend to make neural networks more trustworthy and interpretable, the question arises of how some features favor deep learning to make such a valuable prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For instance, synthetic samples in our work are found to be more useful to train a deep model and it shows better interpretability in comparison to the same model trained without synthetic samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' This is due to the fact that the motion cues which are naturally available in the frame sequences are "easy to learn" for the model, and play an important role in model optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, the importance of interpretation is becoming increasingly popular and leads to useful or promising findings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In our work, Gradient-weighted class activation mapping (denoted as Grad-CAM) [49], Occlusion sensitivity maps (denoted as OCC-SEN) [50], Gradient Attribution map using Guided Backpropagation (denoted as Grad-ATT) [51], and locally interpretable model-agnostic explanations (denoted as LIME) [52] are utilized to understand what patterns in data are deemed important or make the contributions to the final decision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, this enables us to trust the behavior of the developed deep learning model, and/or further tune the model by observing its interpretations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, we extract visualization maps from pretrained DenseNet-201 [57] convolutional neural network for all of the methods above in our experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6, we visualize diverse sets of synthetic images from the CASIA datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The first four rows show print attack images while the next four rows show replay attack images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Each visualization method captures the class discriminative region thanks to the proposed video distillation and synthetic data generation scheme that force the network to use more subtle cues for its correct classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the first row shows that the neurons in the deep convolutional layers focus on the paper’s texture, and hand movement cues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' However, Grad-ATT [51] interpretation shows that the model also takes background as context to make the prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Surprisingly, this issue is eliminated by the proposed synthetic data generation scheme where the second, third, and fourth row shows that the model only considers motion cues, the surface edges and barely touches the background context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In case of a replay attack, the remaining rows show that the tablet screen and hand movement provide discriminative information for the model prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Since we cannot present this for every image from the dataset, we observed that the mouth information, eye blinking, or head rotation contribute positively to distinguishing live and spoofed images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, interpretation from the above methods demonstrates that the proposed learning model is focusing on the correct features of the input data, and the model’s decision can be viewed in a human-understandable way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, the proposed synthetic data generation method provides informative RGB images and helps the model to make the features of spoofed faces more dispersed which allows a better class boundary to generalize well to the target domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Experimental analysis of using open datasets To assess the effectiveness of the synthesized face im- ages, four publicly available databases are used: OULU-NPU database [58] (denoted as O), CASIA Face Anti-Spoofing database (denoted as C) [59], Idiap Replay-Attack database [60] (denoted as I), and MSU Mobile Face Spoofing database [26] (denoted as M).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The performance is evaluated in-terms of Half Total Error Rate (HTER) (half of the summation of false acceptance rate and false rejection rate) and Area Under Curve on the target testing dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Fold 0 Fold 1 Fold 2 Fold 3 Train set Synthetic set 1 Synthetic set 2 Synthetic set 3JOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 7 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 6: Visualization of feature maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The types of images are labelled in the first column.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The second column shows the original encoded and synthetic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The third column illustrates the feature maps from Grad-CAM [49] while the fourth column shows the feature maps from occlusion sensitivity maps [50].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Similarly, the fifth and sixth column visualize the features maps from Gradient Attribution map using Guided Backpropagation [51], and locally interpretable model-agnostic explanations [52], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The last column shows the masked images obtained from LIME predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Implementation details All the images are resized to 224 × 224 according to the input requirement of pretrained DenseNet-201 [57] ar- chitecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The CNN model is fine-tuned by using Stochastic Gradient Descent (SGD) optimizer with a validation frequency of 30, and mini-batch size of 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We set the learning rate up to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0001, and do not use fixed size epochs because an early stopping function [61] is utilized to stop the model automatically to prevent overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' During the ensemble learning stage, the CNN model is fine- tuned with original encoded video clips and three different synthetic sets separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Then, the features from each fine- tuned model are used as input to train three diverse RNN models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the Adam optimizer is utilized with a validation frequency of 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The learning rate is set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0001, and the weights are initialized with He initializer [62] for the first LSTM (Sub-model 1) model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We do not set the fixed epochs because an early stopping function [61] was used to prevent overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For training the second sub-model, the BiLSTM is trained with the hidden layer dimension of 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The other parameters were kept the same as sub-model 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For the third sub-model, the LSTM model is trained with the hidden layer dimension of 100 by decreasing the learning rate of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For the data synthetic method, we generate three synthetic samples, in which 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0, and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 alpha values are used to expand the training images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In order to train the meta-model, the epochs size 80, the Gradient threshold 1, and a hidden layer dimension of 20 was used to train the meta- Image Type Grad-CAM OCC-SEN Grad-ATT LIME Masked Encoded image Synthetic sample Synthetic sample 2 Synthetic sample 3 Encoded image Synthetic sample Synthetic sample 2 Synthetic sampleJOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 8 TABLE III: Performance evaluation using MSU-MFSD (M), CASIA-MFSD (C), replay-attack (I), and OULU-NPU (0) databases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Comparison results are obtained directly from the corresponding papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' O&C&I to M O&M&I to C O&C&M to I I&C&M to O Method HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) MADDG [11] 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='69 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='06 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='50 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='51 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='19 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='99 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='89 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02 DAFL [10] 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='58 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='58 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='41 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='12 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='13 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='76 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='72 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='08 SSDG-R [17] 07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='38 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='17 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='44 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='94 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='71 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='59 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='61 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='54 DR-MD [9] 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='10 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='68 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='43 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='87 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='72 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='47 MA-Net [6] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='80 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='60 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='70 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='30 RFMetaFAS [7] 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='89 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='98 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='27 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='16 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='30 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='48 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='45 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='16 FAS-DR-BC(MT) [53] 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='67 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='09 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='44 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='67 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='93 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='95 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='23 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='18 ADL [12] 05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='58 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='85 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='07 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='68 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='45 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='43 ResNet-BiLSTM w/DS [3] 04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='12 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='93 07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='04 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='87 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='48 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='42 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='33 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='48 HFN + MP [13] 05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='24 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='28 09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='11 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='09 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='35 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='67 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='40 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='26 Cross-ADD [16] 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='64 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='27 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='51 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='98 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='08 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='92 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='27 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='04 ASGS [22] 05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='91 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='88 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='21 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='86 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='84 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='09 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='54 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='73 GDA [14] 09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='20 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='20 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='40 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='60 SSAN-R [39] 06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='67 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='75 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='67 08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='88 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='79 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='72 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='63 FG +HV [15] 09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='17 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='92 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='47 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='47 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='29 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='11 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='58 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='55 Ensemble (CNN-RNN) 04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='95 06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='97 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='97 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='49 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='16 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='91 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='89 TABLE IV: The results of cross-dataset testing on limited source domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The comparison results are obtained directly from the corresponding papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' O&I to M M&I to C O&I to C O&M to I C&M to O Method HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) Supervised [54] 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 Mean-Teacher [55] 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 USDAN [56] 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 EPCR-labeled [54] 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 EPCR-unlabeled [54] 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 Ensemble (CNN-RNN) 07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 learner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' For reproducibility of our results, we keep the same parameter settings for conducting the experiments on all the databases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Comparison against the state-of-the-art methods To compare the performance with the recently introduced domain generalization methods, we conduct cross-dataset test- ing where the model is trained on three source databases and evaluated on a completely unseen database using the leave-one-out (LOO) strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' the testing sets of source databases are used as a validation set for computing the equal error rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, the HTER is calculated directly on the target (unseen) dataset for a fair comparison with the previous methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' As shown in Table III, the proposed ensemble learning provides the best results on three proto- cols of O&C&I to M, O&M&I to C, I&C&M to O, and demonstrates that the model can extract more generalized differentiation cues for face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' This is due to the recently proposed countermeasures paying more attention by exploring a common feature space from multiple source domains that only fit data in the source domains [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In contrast to the existing approaches where adversarial learning [12], generative domain adaptation [14] and meta-learning [13] has been used, the proposed ensemble learning improves the generalization by exploiting the relationship of multiple trained models which are expert in their own source domain, but ensure that meta- learner can take complementary information from them to improve the generalization of face PAD model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Experiment on Limited Source Domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We also consider the scenario of a limited source domain by training the model on two source domains instead of three as shown in Table IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The model continues to achieve the best performance on all the target domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the lowest HTER in four protocols and the highest AUC show that limited source data does not degrade the generalization capability of our network in a challenging testing scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ablation study To verify the superiority of our proposed ensemble learning and the contributions of each sub-model, we conduct exper- iments for multi-source domains and limited-source domains separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Table V reports the numerical results for multi- source domain settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The baseline results represent the performance of the ResNet-BiLSTM model without synthetic data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' These results are based on encoded spatiotemporal im- ages obtained from the proposed video distillation scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Sub-model 1 represents the results when one set of synthetic images were added with spatiotemporal encoded images by using the value of alpha (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The numerical results of CNN and CNN-RNN show that synthetic images start improving the model’s performance on all datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the RNN improves the performance significantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Similarly, sub-model 2 represents the results with a different set of synthetic images (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', alpha value was increased to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The proposed model experienced a slight drop in performance for CNN predictions but continues to improve the performance of RNN on M, I and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Moreover, when we further evaluate the performance JOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 9 TABLE V: Ablation study using cross-database evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' O&C&I to M O&M&I to C O&C&M to I I&C&M to O Method HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) Baseline w/o synthetic data 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='12 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='52 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='63 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='66 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='22 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='44 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='54 Sub-model 1 (CNN) 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='11 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='63 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='66 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='22 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='00 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='39 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='55 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='25 Sub-model 1 (CNN-RNN) 09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='97 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='26 07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='31 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='98 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='87 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='05 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='90 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='55 Sub-model 2 (CNN) 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='82 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='09 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='20 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='49 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='63 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='24 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='01 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='91 Sub-model 2 (CNN-RNN) 08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='40 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='64 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='14 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='04 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='44 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='01 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='41 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='55 Sub-model 3 (CNN) 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='21 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='87 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='22 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='49 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='33 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='05 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='45 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='14 Sub-model 3 (CNN-RNN) 06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='05 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='53 06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='08 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='11 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='33 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='41 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='40 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='95 Ensemble (CNN) 08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='95 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='79 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='80 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='47 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='66 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='19 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='89 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='94 Ensemble (CNN-RNN) 04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='95 06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='97 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='97 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='49 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='16 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='91 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='89 TABLE VI: Ablation study with limited open-source databases using cross-database evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' O&I to M M&I to C O&I to C O&M to I C&M to O Method HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) HTER(%) AUC(%) Sub-model 1 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 Sub-model 2 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 Sub-model 3 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='0 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='9 Ensemble 07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='3 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='1 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 (a) (b) (c) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 7: The T-SNE visualization of feature distributions on cross-testing scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' (a) shows the feature distribution of the original encoded video clips, (b) reflects the feature distribution of encoded video clips with a subset of synthetic samples, (c) shows the feature distribution of meta-learner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' TABLE VII: Average execution time (in seconds) Dataset Optical flow [63] ASGS method [22] TSS method [8] Ours CASIA-FASD 1560 1487 1140 1023 REPLAY-ATTACK 1082 1003 780 641 on the third set of synthetic (α = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5) images, sub-model 3 shows that further improvement can be achieved with synthetic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' When we combine the prediction of these sub-models and train the meta-learner, we achieve remarkable performance on three datasets in comparison to state-of-the-art methods [53],[6], [7],[8],[9],[10],[11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The quantitative results indicate that the ensemble learning guided by video distillation scheme is beneficial to improve the performance for cross-domain face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Analysis of limited source domains: In Table VI, we com- pare the domain generalization ability of our proposed method when limited source domain databases are accessible (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' only two source datasets).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The results indicate that the proposed method is effective even in challenging cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' We hypothesize that this improvement is due to the fact that encoded RGB images with synthetic samples are almost as descriptive as the entire video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Comparisons of execution times: We analyze the execution times of the proposed video distillation technique with the previous global motion estimation methods [8], [22] and optical flow[63].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Table VII reports the numerical results in the total number of seconds used to generate the training samples on two datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' All these comparison results were reported by using a MATLAB environment based on a workstation with 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='5 GHz Intel Core i7-5930k and 64 GB RAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' One can see that the proposed global motion estimation technique is computationally less expensive than the previous motion estimated methods reported recently in the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Visualization and Analysis To intuitively show the contribution of each sub-model, we visualize the feature distribution of different features using t-SNE [64], as illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The model is trained on 0+C+I source domains without synthetic samples and shows a trivial distribution in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 7 (a) with an unclear interface between live and spoofed samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' One can see these overlapped areas can be easily misclassified and cause to degrade the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' After adding synthetic samples to the sub-model, as represented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 7 (b), the feature distribution improves and provides a relatively clear interface than the baseline model, that is because the synthetic samples force Attack RealAttack RealAttack RealJOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 10 (a) (b) (c) (d) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 8: The Receiver Operating Characteristics (ROC) curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' (a) O&C&I to M, (b) O&M&I to C, (c) O&C&M to I, and (d) I&C&M to O are developed by plotting the true positive rate (TPR) against the false positive rate (FPR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' the model to predict the spatiotemporal artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Nonetheless, when the meta-model is introduced, a well-structured and compact distribution with a clear interface can be seen in Fig 7 (c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, our proposed ensemble learning shows good generalizability on unseen target data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8, we visualize ROC curves to show how much the model is capable of distinguishing real and attack classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' As illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8, the meta-model on all datasets achieves more than 90% AUC which is a very impressive performance on unseen testing sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' The ROC curve is plotted with TPR against the FPR where FPR is on the x-axis and TPR is on the y-axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the meta-model (ensemble) drag curves closer to the top-left corner indicate better performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Conclusions In this paper, we show that ensemble learning represents an interesting research direction for improving the generalization of face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' In particular, the model is comprised of multiple synthetic source domains, and each sub-model predicts the spatiotemporal inconsistencies based on their similarity to each training domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Besides, a meta-learner is introduced to take the complementary information from each sub-model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Based on the experimental results on four benchmark datasets, the proposed method exhibits better performance than a single model trained only on original training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Thus, using ensemble stacking is shown to outperform the existing state- of-the-art generalization methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Finally, the interpretation of the model shows that capturing the motion information is quite helpful to improve the generalization ability of the proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Our future work will focus on the development of robust motion estimation methods in end-to-end learning to improve the generalization of face PAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Declaration of Competing Interest The authors have no conflict of interest that could have appeared to influence the work reported in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' VII.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Acknowledgments This work is supported by the Center for Machine Vision and Signal Analysis (CMVS) in the Faculty of Information Technology and Electrical Engineering (ITEE) at University of Oulu, Finland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' References [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kim, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kim, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Nan, “A structural topic model for exploring user satisfaction with mobile payments,” Computers, Materials and Continua, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 73, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 3815–3826, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [2] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Grother, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ngan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hanaoka, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', “Ongoing face recognition vendor test (frvt) part 2: Identification,” 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [3] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Muhammad and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Oussalah, “Face anti-spoofing from the perspec- tive of data sampling,” Electronics Letters, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [4] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Muhammad and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Oussalah, “Self-supervised face presentation attack detection with dynamic grayscale snippets,” arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='13070, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [5] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Boulkenafet, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Komulainen, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hadid, “Face spoofing detec- tion using colour texture analysis,” IEEE Transactions on Information Forensics and Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 8, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1818–1830, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [6] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Tan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lei, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Guo, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, “Face anti- spoofing via adversarial cross-modality translation,” IEEE Transactions on Information Forensics and Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 16, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2759–2772, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [7] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Shao, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lan, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yuen, “Regularized fine-grained meta face anti-spoofing,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 34, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11974–11981, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [8] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Muhammad, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Komulainen, “Self-supervised 2d face presentation attack detection via temporal sequence sampling,” Pattern Recognition Letters, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [9] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Han, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Shan, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chen, “Cross-domain face pre- sentation attack detection via multi-domain disentangled representation learning,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 6678–6687, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [10] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Saha, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Xu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kanakis, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Georgoulis, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Paudel, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Van Gool, “Domain agnostic feature learning for image and video based face anti-spoofing,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 802–803, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [11] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Shao, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yuen, “Multi-adversarial discriminative deep domain generalization for face presentation attack detection,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 10023–10031, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [12] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Mu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ruan, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Shu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yang, “Adversarial learning and decomposition-based domain generalization for face anti- spoofing,” Pattern Recognition Letters, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 155, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 171–177, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [13] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Cai, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wan, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hu, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kot, “Learning meta pattern for face anti-spoofing,” IEEE Transactions on Information Forensics and Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 17, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1201–1213, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [14] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhou, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yao, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yi, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Sheng, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ding, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ma, “Generative domain adaptation for face anti-spoofing,” in European Conference on Computer Vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 335–356, Springer, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [15] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Xu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ding, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ma, “Feature generation and hypothesis verification for reliable face anti-spoofing,” in Proceed- ings of the AAAI Conference on Artificial Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 36, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1782– 1791, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [16] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Huang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Xiang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lv, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Weng, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Fu, “Generalized face anti-spoofing via cross-adversarial disentanglement with mixing augmentation,” in ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2939–2943, IEEE, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [17] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jia, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Shan, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chen, “Single-side domain generaliza- tion for face anti-spoofing,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 8484–8493, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 rate positive 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 True 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 Sub-model 1 Sub-model 2 Sub-model 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 Ensemblelearning 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 1 Falsepositive rate0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 rate positive 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 Sub-model 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 Sub-model 2 Sub-model 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 Ensemblelearning 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 Falsepositiverate0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 rate positive 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 Sub-model 1 Sub-model 2 Sub-model 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 Ensemblelearning 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 Falsepositiverate0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 rate positive 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 True 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 Sub-model 1 Sub-model 2 Sub-model 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 Ensemblelearning 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='8 FalsepositiverateJOURNAL OF LATEX CLASS FILES, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, NOVEMBER 2022 11 [18] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhou, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Qiao, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Xiang, “Domain adaptive ensemble learning,” IEEE Transactions on Image Processing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 30, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 8008– 8018, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [19] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Goodfellow, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Pouget-Abadie, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Mirza, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Xu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Warde-Farley, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ozair, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Courville, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Bengio, “Generative adversarial networks,” arXiv preprint arXiv:1406.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='2661, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [20] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Donahue, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Krähenbühl, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Darrell, “Adversarial feature learn- ing,” arXiv preprint arXiv:1605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='09782, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Radford, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Metz, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chintala, “Unsupervised representation learning with deep convolutional generative adversarial networks,” arXiv preprint arXiv:1511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='06434, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [22] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Muhammad, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Oussalah, “An adaptive spatio-temporal global sampling for presentation attack detection,” IEEE Transactions on Circuits and Systems II: Express Briefs, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [23] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Freitas Pereira, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Anjos, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Martino, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Marcel, “Lbp- top based countermeasure against face spoofing attacks,” in Asian Conference on Computer Vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 121–132, Springer, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [24] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Patel, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Han, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jain, “Secure face unlock: Spoof detection on smartphones,” IEEE transactions on information forensics and security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2268–2283, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [25] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Boulkenafet, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Komulainen, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Feng, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hadid, “Scale space texture analysis for face anti-spoofing,” in 2016 International Conference on Biometrics (ICB), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1–6, IEEE, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [26] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wen, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Han, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jain, “Face spoof detection with image distortion analysis,” IEEE Transactions on Information Forensics and Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 10, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 746–761, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [27] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lei, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liao, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, “Face liveness detection with component dependent descriptor,” in 2013 International Conference on Biometrics (ICB), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1–6, IEEE, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [28] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Feng, “Face anti-spoofing via deep local binary pattern,” in Deep Learning in Object Detection and Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 91–111, Springer, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [29] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Muhammad, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Holmberg, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' de Melo, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hadid, “Face anti- spoofing via sample learning based recurrent neural network (rnn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=',” in BMVC, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 113, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [30] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Tirunagari, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Poh, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Windridge, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Iorliam, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Suki, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ho, “Detection of face spoofing using visual dynamics,” IEEE transactions on information forensics and security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 10, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 762–777, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [31] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Bharadwaj, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Dhamecha, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Vatsa, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Singh, “Computa- tionally efficient face spoofing detection with motion magnification,” in Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 105–110, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [32] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='-Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lan, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yuen, “Multi-channel remote photoplethys- mography correspondence feature for 3d mask face presentation attack detection,” IEEE Transactions on Information Forensics and Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 16, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2683–2696, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [33] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Sun, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Pan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wu, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lao, “Blinking-based live face detec- tion using conditional random fields,” in International Conference on Biometrics, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 252–260, Springer, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [34] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yin, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ming, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Tian, “A face anti-spoofing method based on optical flow field,” in 2016 IEEE 13th International Conference on Signal Processing (ICSP), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1333–1337, IEEE, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [35] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' He, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Rocha, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jiang, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kot, “Learning generalized deep feature representation for face anti-spoofing,” IEEE Transactions on Information Forensics and Security, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 13, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2639–2652, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [36] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jourabloo, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, “Learning deep models for face anti- spoofing: Binary or auxiliary supervision,” in Proceedings of the IEEE conference on computer vision and pattern recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 389–398, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [37] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yue, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Feng, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Han, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ding, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, “Cyclically disentangled feature translation for face anti-spoofing,” arXiv preprint arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='03651, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [38] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chuang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lai, “Generalized face anti- spoofing via multi-task learning and one-side meta triplet loss,” arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='15955, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [39] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Deng, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Gao, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, “Domain generalization via shuffled style assembly for face anti-spoofing,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 4123–4133, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [40] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Rosten and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Drummond, “Fusing points and lines for high perfor- mance tracking,” in Tenth IEEE International Conference on Computer Vision (ICCV’05) Volume 1, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1508–1515, Ieee, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [41] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Alahi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ortiz, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Vandergheynst, “Freak: Fast retina keypoint,” in 2012 IEEE conference on computer vision and pattern recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 510–517, Ieee, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [42] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Torr and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zisserman, “Robust parameterization and computation of the trifocal tensor,” Image and vision Computing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 15, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 8, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 591–605, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [43] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kamgar-Parsi, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lawson, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kamgar-Parsi, “Toward develop- ment of a face recognition system for watchlist surveillance,” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 33, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1925–1937, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [44] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hochreiter and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Schmidhuber, “Long short-term memory,” Neural computation, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 8, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1735–1780, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [45] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Fatemifar, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Awais, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Akbari, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Kittler, “A stacking ensemble for anomaly based client-specific face spoofing detection,” in 2020 IEEE International Conference on Image Processing (ICIP), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1371–1375, IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [46] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ganaie, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hu, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=', “Ensemble deep learning: A review,” arXiv preprint arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='02395, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [47] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Polikar, “Ensemble learning,” in Ensemble machine learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1– 34, Springer, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [48] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Schuster and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Paliwal, “Bidirectional recurrent neural net- works,” IEEE transactions on Signal Processing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 45, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 2673–2681, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [49] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Selvaraju, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Cogswell, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Das, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Vedantam, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Parikh, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Batra, “Grad-cam: Visual explanations from deep networks via gradient-based localization,” in Proceedings of the IEEE international conference on computer vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 618–626, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [50] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zeiler and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Fergus, “Visualizing and understanding convolu- tional networks,” in European conference on computer vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 818– 833, Springer, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [51] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Springenberg, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Dosovitskiy, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Brox, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Riedmiller, “Striving for simplicity: The all convolutional net,” arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='6806, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [52] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ribeiro, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Singh, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Guestrin, “" why should i trust you?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='" explaining the predictions of any classifier,” in Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1135–1144, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [53] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Qin, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhao, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lei, “Meta-teacher for face anti-spoofing,” IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [54] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Qin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lei, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Wang, “Consistency regularization for deep face anti-spoofing,” arXiv preprint arXiv:2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content='12320, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [55] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Tarvainen and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Valpola, “Mean teachers are better role mod- els: Weight-averaged consistency targets improve semi-supervised deep learning results,” Advances in neural information processing systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 30, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [56] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Jia, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Shan, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chen, “Unified unsupervised and semi-supervised domain adaptation network for cross-scenario face anti- spoofing,” Pattern Recognition, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 115, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 107888, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [57] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Huang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Van Der Maaten, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Weinberger, “Densely connected convolutional networks,” in Proceedings of the IEEE confer- ence on computer vision and pattern recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 4700–4708, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [58] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Boulkenafet, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Komulainen, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Feng, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hadid, “Oulu-npu: A mobile face presentation attack database with real-world variations,” in 2017 12th IEEE international conference on automatic face & gesture recognition (FG 2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 612–618, IEEE, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [59] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Liu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Lei, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Yi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Li, “A face anti- spoofing database with diverse attacks,” in 2012 5th IAPR international conference on Biometrics (ICB), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 26–31, IEEE, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [60] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Chingovska, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Anjos, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Marcel, “On the effectiveness of local binary patterns in face anti-spoofing,” in 2012 BIOSIG-proceedings of the international conference of biometrics special interest group (BIOSIG), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1–7, IEEE, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [61] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Prechelt, “Early stopping-but when?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=',” in Neural Networks: Tricks of the trade, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 55–69, Springer, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [62] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' He, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Ren, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Sun, “Delving deep into rectifiers: Surpassing human-level performance on imagenet classification,” in Proceedings of the IEEE international conference on computer vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1026–1034, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [63] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Horn and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Schunck, “Determining optical flow,” Artificial intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 17, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 1-3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 185–203, 1981.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' [64] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Van der Maaten and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' Hinton, “Visualizing data using t-sne.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=',” Journal of machine learning research, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} +page_content=' 11, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tA0T4oBgHgl3EQfNP_j/content/2301.02145v1.pdf'} diff --git a/3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf b/3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..21d98d94dac7ccaa118dcccadc146086dffbd2fb --- /dev/null +++ b/3tFAT4oBgHgl3EQfERy2/content/2301.08421v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9cbd38f9161ac71ab44828858540d255b6f02e9be3801353243e99b327d6298 +size 1609960 diff --git a/4dAzT4oBgHgl3EQf9f77/content/tmp_files/2301.01922v1.pdf.txt b/4dAzT4oBgHgl3EQf9f77/content/tmp_files/2301.01922v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..31c0f3c6ffebb55d8568f93e7bf0243426599347 --- /dev/null +++ b/4dAzT4oBgHgl3EQf9f77/content/tmp_files/2301.01922v1.pdf.txt @@ -0,0 +1,1151 @@ +Open-Set Face Identification +on Few-Shot Gallery by Fine-Tuning +Hojin Park, Jaewoo Park, and Andrew Beng Jin Teoh +School of Electrical and Electronics Engineering +College of Engineering, Yonsei University +Seoul, Korea +2014142100@yonsei.ac.kr, julypraise@gmail.com, bjteoh@yonsei.ac.kr +Abstract—In this paper, we focus on addressing the open- +set face identification problem on a few-shot gallery by fine- +tuning. The problem assumes a realistic scenario for face iden- +tification, where only a small number of face images is given +for enrollment and any unknown identity must be rejected +during identification. We observe that face recognition models +pretrained on a large dataset and naively fine-tuned models +perform poorly for this task. Motivated by this issue, we propose +an effective fine-tuning scheme with classifier weight imprinting +and exclusive BatchNorm layer tuning. For further improvement +of rejection accuracy on unknown identities, we propose a novel +matcher called Neighborhood Aware Cosine (NAC) that computes +similarity based on neighborhood information. We validate the +effectiveness of the proposed schemes thoroughly on large-scale +face benchmarks across different convolutional neural network +architectures. The source code for this project is available at: +https://github.com/1ho0jin1/OSFI-by-FineTuning +I. INTRODUCTION +Recently face recognition (FR) has achieved astonishing +success attributed to three factors in large. Deep convolutional +neural network (CNN) architectures [2], [3] that have strong +visual prior were developed and leveraged as FR embedding +models. Large-scale face datasets [4], [5] that cover massive +identities with diverse ethnicity and facial variations became +available. On top of these, various metric learning losses +[6]–[9] elevated the performance of deep FR models to an +unprecedented level. +The majority of FR embedding models have been evaluated +on numerous benchmarks with closed-set identification [7]– +[11]. The closed-set identification protocol assumes all probe +identities present in the gallery. However, in a realistic sce- +nario, an unknown identity that is not enrolled may be en- +countered. Another important but practical aspect to consider +is the scarcity of intra-class samples for the gallery identities +to be registered; namely, due to the expensive data acquisition +cost and privacy issue, only a very small number of samples +might be available for each gallery identity to register. In this +respect, open-set face identification (OSFI) with the small- +sized gallery is closer to a real scenario as it needs to perform +both known probe identity identification and unknown probe +identity rejection based on the limited information from the +small gallery set. Despite its versatile practical significance, +however, OSFI with a small gallery has been rarely explored. +Devising a model specific to OSFI with a small gallery +can be challenging in the following aspects: Firstly, an OSFI +(a) +(b) +Fig. 1. +(a) Full fine-tuning all parameters severely degrades the OSFI +performance, while our method significantly improves the pre-trained model. +Detection & Identification Rate (DIR) [1] quantifies both correct identification +of the known probe identities and detection of the unknown. (b) An outline of +our proposed fine-tuning scheme: Given a model pretrained on a large-scale +face database, we initialize the gallery set classifier by weight imprinting, +and then fine-tune the model on a few-shot gallery set by training only the +BatchNorm layers. In the evaluation stage, a given probe is either accepted as +known or rejected as an unknown identity based on novel similarity matcher +dubbed Neighborhood Aware Cosine (NAC) matcher. +model performs both identifications of a known probe identity +but also correct rejection of unknown probe identity. Hence, +conventional FR embedding models devised mainly for closed- +set identification can perform poorly at the rejection of the +unknown. In fact, as observed in Fig. 1 (a), FR embedding +models pretrained on a large-scale public face database are +not effective for open-set identification, leaving room for +improvement. This suggests the need for fitting the pretrained +arXiv:2301.01922v1 [cs.CV] 5 Jan 2023 + +IJB-C +CASIA-WebFace +0.8 +0.9 +Rate +Pretrained +Pretrained +0.7 +Full finetuning +0.8 +Full finetuning +&Identification +Ours +Ours +0.6 +0.7 +0.5 +0.6 +etection +0.4 +0.5 +0.3 +0.4 +0.01 +0.1 +1.0 +0.01 +0.1 +1.0 +FalseAlarmRate +FalseAlarmRatePretrain Set +Evaluation Set (disjoint from pretrain set) +Known +Unknown +Gallery (Few-shot) +Known Query +Unknown Query +Weight Imprinting +Probe +Accept +M +NAC +2Q +Reject +Evaluation +Pretraining +BatchNorm-only +Fine-Tuningmodel to be more specific to the given gallery set. +Secondly, due to the few-shot nature of the small-sized +gallery set, there is a high risk of overfitting for fine-tuning +the pretrained model. As shown in Fig. 1 (a), full fine-tuning +(i.e. updating all parameters) of the pretrained model results +in severe performance degradation. This drives us to devise an +overfitting-resilient parameter tuning scheme. +Moreover, an ordinary cosine similarity matcher used in the +closed-set identification might have a large tradeoff between +the known probe identity identification and unknown probe +identity rejection. As will be observed in Sec. III-D, the simple +cosine matcher has a severe drawback for the task at hand. This +motivates us to devise a robust matcher for OSFI. +Based on these observations, we propose an efficient fine- +tuning scheme and a novel similarity-based matcher for OSFI +constrained on a small gallery set. Our fine-tuning scheme +consists of weight initialization of the classifier governed by +weight imprinting (WI) [12] and training only BatchNorm +(BN) layers [13] for overfitting-resilient adaptation on the +small gallery set. Moreover, for both effective detection of +the unknown and identification of the known probe identities, +a novel Neighborhood Aware Cosine (NAC) matcher that +respects the neighborhood information of the learned gallery +features, and hence better calibrates the rejection score is +proposed. Our contributions are summarized as follows: +1) To effectively solve the OSFI problem constrained on a +small gallery set, we propose to fine-tune the pretrained +face embedding model. Since full fine-tuning deterio- +rates the embedding quality, we search for the optimal +method. +2) We demonstrate that the combination of weight imprint- +ing and exclusive BatchNorm layer fine-tuning excels +other baselines. +3) We recognize that the commonly used cosine similarity +is a sub-optimal matcher for rejection. We propose a +novel matcher named NAC that significantly improves +the rejection accuracy. +II. RELATED WORKS +A. Open Set Face Identification (OSFI) +[14], one of the earliest works in OSFI, used their proposed +Open-set TCM-kNN on top of features extracted by PCA and +Fisher Linear Discriminant. [15] proposed their own OSFI +protocol and showed that an extreme value machine [16] +trained on the gallery set performs better than using cosine +similarity or linear discriminant analysis for matchers. [17] +trained a matcher composed of locality sensitive hashing [18] +and partial least squares [19]. [20] applied OpenMax [21] +and PROSER [22], two methods for open-set recognition of +generic images, on top of extracted face features. +All previous works propose to train an open-set classifier +(matcher) of some form, but all of them use a fixed +encoder. To the best of our knowledge, we are the first to +propose an effective fine-tuning scheme as a solution to OSFI. +B. Cosine Similarity-based Loss Functions +[23] proposed to l2-normalize the features such that the +train loss is only determined by the angle between the feature +and the classifier weights. [7] further extended this idea by +applying a multiplicative margin to the angle between a feature +and its corresponding weight vector. This penalized the intra- +class features to be gathered while forcing inter-class centers +(prototypes) to be separated. A number of follow-up papers +such as [8]–[11] modify this angular margin term in different +ways, but their motivations and properties are generally simi- +lar. Therefore, in our experiments we only use CosFace loss [8] +as a representative method. For comprehensive understanding +of these loss functions, refer to [24]. +III. APPROACH +Our proposed approach is two-fold: fine-tuning on the +gallery and open-set identification evaluation. In the fine- +tuning stage, the classifier is initialized by weight imprinting +to initiate learning from optimal discriminative features, and +the model is fine-tuned by updating only the BatchNorm layers +to avoid overfitting on the few-shot gallery data. In evaluation, +we utilize a novel matcher NAC that computes a neighborhood +aware similarity for better-calibrated rejection of the unknown. +We demonstrate that the combination of these three methods +significantly outperforms all other baselines. +A. Problem Definition and Metrics +Formally, in an OSFI problem, we assume the availability +of an encoder φ pretrained on a large-scale face database (FR +embedding model), which is disjoint from the evaluation set +with respect to identity. The evaluation set consists of a gallery +G = {(xG +i , yG +i )}Cm +i=1 and a probe set Q. The probe set Q is +further divided into the known probe set K = {(xK +i , yK +i )} +and the unknown probe set U = {(xU +i , yU +i )}. G and K has no +overlapping images x but shares same identities y ∈ {1, ..., C} +whereas U has disjoint identities, i.e., YU ∩ {1, ..., C} = Ø. +m refers to the number of images per identity in G, which +we fix to 3 to satisfy the few-shot constraint. We allow the +encoder to be fine-tuned over the gallery set. +The evaluation of OSFI performance uses the detection +and identification rate at some false alarm rate (DIR@FAR). +FAR=1 means we do not reject any probe. Note that unlike +the general case shown in [1], here we only consider rank- +1 identification rate for DIR. Therefore, DIR@FAR=1 is the +rank-1 closed-set identification accuracy. +B. Classifier Initialization by Weight Imprinting +Due to the few-shot nature of the gallery set where we +fine-tune on, the initialization of model parameters and, in +particular, of classifier weights is crucial to avoid overfitting. +The most naive option is a random initialization of the +classifier weight matrix W. Another commonly used strategy +is linear probing [25], i.e., finding an optimized weight W +that minimizes the classification loss over the frozen encoder +embeddings φ(x). + +We experimentally find that, as seen in Fig. 2, both of these +initialization schemes do not induce discriminative structure +for the encoder embedding φ(x). In particular, during fine- +tuning, each weight vector wc in the classifier acts as a center +(or prototype) for the c-th class (i.e. identity). Fig. 2 shows that +neither random initialization nor linear probing of wc derives +optimally discriminative weight vectors wc, resulting in low +quality of class separation of gallery features. +Motivated from this issue, we propose to initialize by weight +imprinting (WI), which induces the optimal discriminative +quality for the gallery features: +wc = +� +wc +∥� +wc∥2 +, +� +wc = 1 +m +� +yG +i =c +φ(xG +i ) +(1) +where ∥·∥2 is the l2 norm, and the embedding feature φ(x) is +unit-normalized such that ∥φ(x)∥2 = 1. +As expected, Fig. 2 verifies that fine-tuning from the weight +imprinted initialization achieves a much higher discriminative +quality. This shows the superiority of weight imprinting com- +pared to random initialization and linear probing. +Note that weight imprinting has been frequently used in FR +embedding models [8], [9]. However, the critical difference +is that those models utilize weight imprinting only to prepare +templates before evaluation. In our case, the WI initialization +is utilized particularly for fine-tuning. +C. BatchNorm-only Fine-Tuning +Choosing the appropriate layer to tune is another important +issue for fine-tuning. Moreover, due to the extremely small +number of samples for each gallery identity, there is a risk +of overfitting as suggested by the classical theory on the vc +dimension [26]. In fact, a recent study [25] suggests that full +fine-tuning hurts the pretrained filters including the useful +convolutional filters learned from a large-scale database. +To minimize the negative effect of this deterioration, we +fine-tune only the BatchNorm (BN) layers along with the +classifier weight: +min +W, θBN L(W T φθ(x), y), +θ = [θBN, θrest] +(2) +where θ refers to all parameters in the encoder φ = φθ and +θBN and θrest respectively refers to BatchNorm parameters +and the rest. During fine-tuning, θrest is fixed with no gradient +flow. The loss function L can be a softmax cross-entropy, or +widely used FR embedding model losses such as ArcFace [9] +and CosFace [8]. +Due to selective fine-tuning of only the BN layers (and clas- +sifier weight), the convolutional filters learned from the large- +scale pre-train database are simply transferred. The BN-only +training is thus computationally efficient as it occupies only +0.1-0.01% of the total parameters in the CNN. Nevertheless, +its model complexity is sufficient to learn a general image task +as guaranteed by [27]. +Fig. 2. +The Intra-class variance (left) and inter-class separation (right) of +classifiers that are initialized by different schemes. NormFace [23], CosFace +[8] and ArcFace [9] loss are used for linear probing initialization. The weight +imprinting initialization does not require training, thus stays constant. +Fig. 3. An unknown +feature u placed be- +tween gallery proto- +types of class i and +j. ϵ is some small +positive constant. +TABLE I +AVERAGE ANGLE (DEGREES) BETWEEN IJB-C +PROBE FEATURE VECTORS AND THEIR TOP-K +CLOSEST GALLERY PROTOTYPES. THE THIRD +COLUMN REFERS TO THE AVERAGE OF TOP-2 TO +TOP-16. +Encoder +top-1 +top-2 +2∼16 +Res50 +K +50.7◦ +64.0◦ +69.1◦ +U +63.8◦ +66.0◦ +69.7◦ +VGG19 +K +53.4◦ +66.2◦ +71.4◦ +U +65.9◦ +68.2◦ +72.1◦ +D. Neighborhood Aware Cosine Similarity +The cosine similarity function is the most predominant +matcher for contemporary face verification and identification. +Denoting the probe feature vector as p and the gallery pro- +totypes as {gj}C +j=1, where gj := +1 +m +� +yG +i =j φ(xG +i ) is the +mean of all the normalized gallery feature vectors of class +j, identification is performed by finding the maximum class +index c = arg maxj=1,...,C cos(p, gj). On the other hand, in +the extension to OSFI, the decision of accepting as known or +rejecting as unknown can be formulated: +max +j=1,...,C cos(p, gj) +Accept +≷ +Reject +τ +(3) +where cos(p, q) = +p +∥p∥2 · +q +∥q∥2 is the cosine similarity between +two feature vectors, τ is the rejection threshold. +Now, consider an example illustrated in Fig. 3. The cosine +matcher will assign the probe u to the identity i with the +acceptance score 0.866, which is fairly close to the maximum +score 1. This value alone might imply that the probe is a +known sample as it is close to the gallery identity i. However, +the probe feature vector is placed right in the middle of the +identities i and j. The in-between placement of u suggests +that the probe can be possibly unknown and thus should be +assigned with a lesser value of the acceptance score. +Motivated by this intuition, we propose the Neighborhood +Aware Cosine (NAC) matcher that respects all top-k surround- +ing gallery features: +NAC(p, gi) = exp(cos(p, gi)) · 1[i ∈ Nk] +� +j∈Nk exp(cos(p, gj)) +(4) + +Intra-classvariance +Inter-class separation +。06 +104 ° +NormFace +CosFace +80 ° +103 ° +ArcFace +70 ° +Weight Imprinting +102 ° +Angle +60 ° +101 ° +。09 +100 ° +40 ° +。66 +98 ° +0 +5 +10 +15 +20 +0 +5 +10 +15 +20 +Epochs +Epochs9i +30° +30° + E +.9jFig. 4. The distributions of scores for known (K) and unknown (U) probes +of IJB-C dataset using cosine similarity (left) and NAC with k = 16 (right). +The scores are min-max normalized and τ is set such that FAR=0.01 for both +cases. DIR=48.05% (left) vs DIR=54.53% (right). ResNet-50 was used as the +encoder. +Here, Nk is the index set of k gallery prototypes that are +nearest to the probe feature p, and 1 is the indicator function. +The main goal of the NAC matcher is to improve the unknown +rejection. Table I shows that known probe features are much +closer to their closest prototype than the second-closest proto- +type, unlike unknown probes. By exploiting this phenomenon, +the NAC matcher is able to assign a much smaller score to +unknown probe, as shown in Fig. 4. +IV. EXPERIMENTS +A. Datasets +We use VGGFace2 [4] dataset for pretraining the encoders, +and CASIA-WebFace [28] and IJB-C [29] for evaluation. Us- +ing MTCNN [30], we align and crop every images to 112x112 +with equal parameters for all datasets. For VGGFace2, we +remove all identities overlapping with the evaluation datasets. +The evaluation datasets are equally split into two groups +such that the number of known and unknown identities are +equal. Then we randomly choose m=3 images of the known +identities to create the gallery (G), and the rest are known +probes (K). All images of unknown identities are unknown +probes (U). Table II summarizes the statistics of the datasets +we use. Note that we chose every known identity to have more +than 10 images such that there can be at least 7 probe samples. +Also note that IJB-C dataset consists of still images and video +frames (video frames typically have poorer image quality). We +sample the gallery from still images and probes from video +frames, which makes this dataset much challenging. We note +that the protocol devised here can be regarded as an extension +of that in [15]. +B. Baselines +1) Classifier Initialization: Along with Weight Imprinting +(denoted WI), we report the results of using random ini- +tialization and linear probing initialization as described in +Sec. III-B. +2) Encoder Layer Fine-Tuning: Along with BatchNorm- +only fine-tuning (denoted as BN), we explore tuning other +layers of the encoder. The simplest one is tuning every layer +(i.e. all parameters of a model), which we denote as full. The +second is freezing the early layers and training only the deeper +ones, which we denote as partial. We also consider the parallel +residual adapter [31], which adds additional 1x1 convolutional +TABLE II +DATASET STATISTICS. THE NUMBER INSIDE THE PARENTHESES REFERS +TO THE AVERAGE NUMBER OF IMAGES PER IDENTITY. FOR EVALUATION +DATASETS, KNOWN IDENTITIES CONSIST OF THE GALLERY (G) AND +KNOWN PROBE (K), WHERE THE GALLERY HAS 3 IMAGES PER IDENTITY. +Pretrain +# IDs (images / ID) +VGGFace2 +7,689 (354.0) +Evaluation +Known (G + K) +Unknown (U) +CASIA-WebFace +5,287 (3+20.0) +5,288 (16.5) +IJB-C +1,765 (3+15.3) +1,765 (13.9) +TABLE III +THE TOTAL NUMBER OF PARAMETERS AND NUMBER OF FINE-TUNED +PARAMETERS FOR EACH ENCODER FINE-TUNING SCHEME. ‘+’ REFERS TO +THE NUMBER OF ADDED PARAMETERS FOR THE PARALLEL ADAPTER. +# Params (million) +VGG19 +Res50 +Pretrained +32.88 +43.58 +Full fine-tuning +32.88 +43.58 +Partial fine-tuning +4.72 +4.72 +Parallel Adapter ++2.22 ++3.39 +BN-only fine-tuning +0.01 +0.03 +filters to the original convolutional layers. During fine-tuning, +only these additional filters are trained to capture the subtle +difference in the new dataset. Note that the authors in [31] +apply this technique to ResNet [3], hence the name residual +parallel adapter. But this idea can be generally applied to +CNNs without residual connection, hence we also apply this +to a VGG-style network. We denote this as PA, referring to +Parallel Adapter. +3) Matcher: During OSFI evaluation, the vanilla cosine +similarity matcher is adopted as the baseline matcher. When +the NAC matcher is used, we denote by NAC. For comparison, +we also use the extreme value machine (EVM) proposed by +[15]. We train the EVM on the gallery set with the best +parameters found by the authors. +In summary, classifier initialization methods we consider are +{Random, Linear probing, WI}, fine-tuning layer configu- +rations are {Full, Partial, PA, BN}, and matchers are {cos, +EVM, NAC}. We test the OSFI performances among different +combinations of these three components. Our proposed OSFI +scheme is to use WI+BN+NAC jointly. +C. Training Details +We choose VGG19 [2] and ResNet-50 [3] for the encoders +with the feature dimension 512. We pretrain these encoders +on the VGGFace2 dataset with CosFace with scale=32, mar- +gin=0.4 as loss function until convergence. +Then we fine-tune the encoder with different classifier +initialization schemes and encoder layer configurations. When +using the linear probing initialization, we train the classifier +until the training accuracy reaches 95%. +We follow the encoder layer finetuning in Sec. IV-B. For +the partial fine-tuning, we only train the last 2 convolutional +layers (Conv-BN-ReLU-Conv-BN-ReLU). Table III shows the +number of total and updated parameters for each fine-tuning +scheme. + +COS +NAC (k=16) +T +Known +Known +Unknown +Unknown +0.00 +0.25 +0.50 +0.75 +1.00 +0.00 +0.25 +0.50 +0.75 +1.00Fig. 5. The OSFI performance of cosine similarity and NAC with different values of k on IJB-C dataset, using VGGNet-19 (left) and ResNet-50 (mid) as the +encoder. The square markers refer to cosine similarity and star marks the optimal k for different layer fine-tuning methods. To summarize the OSFI performance +into a single number, we used the area under the curve (AUC, %) of DIR@FAR curve. (Right) DIR@FAR curve of Pretrained and BN configuration using +cosine similarity and NAC (k=16) as the matcher. Numbers in the legend show the AUC values. When k = 1, NAC is replaced by cos. +We fix the number of epochs to 20 and batch size to 128 +for every method. We again use CosFace loss for consistency. +For the optimizer we use Adam [32] with cosine annealing. +The initial learning rate is set to 1e-4 for full and PA, and 1e- +3 for partial and BN, which we find as the optimal learning +rate for each method. For data augmentation, we use random +horizontal flipping and random cropping with the random scale +from 0.7 to 1.0. The cropped images are resized to the original +size. +D. Optimal k for NAC +Since the gallery set is too small, we cannot afford a separate +validation set to individually optimize k for each dataset. +Instead, we attempt to find a global value that has optimal +performance regardless of the fine-tuning method, if one exists. +We first fine-tune the encoders with different layer con- +figurations, which gives us five different encoders includ- +ing one without any fine-tuning; pretrained, full, partial, +PA, and BN. Then we search the best parameter k for the +NAC matcher by grid search strategy, where the grid is +[2,4,8,16,32,128,256,512,1024,C], and C is the total number +of identities. Note that k = 1 refers to using cosine similarity +instead of NAC, which we added for comparison. Since a +single-value objective is preferred, we use the area under the +curve (AUC) of the DIR@FAR curve instead of DIR value +at different FAR values. We repeat this process with different +datasets and encoder architectures. +The results are shown in Fig. 5. We did not include +the results of CASIA-WebFace as it shows a similar trend. +Excluding k = 1 which is not NAC, the results show a smooth +unimodal curve with a peak at k = 16 or 32. This shows +that the NAC matcher indeed has a globally optimal k value +that is robust against different datasets, encoders, and fine-tune +methods. Thus we choose k = 16 (k = 32 also gives similar +results) as the global parameter throughout this paper. +Note that when k = C, NAC becomes equivalent to softmax +function with cosine similarity logits. However, this is notably +inferior compared to k = 16, which implies that considering +only the k-nearest is superior to considering every gallery +prototype. +E. Comparison of Fine-Tuning Methods +We compare the OSFI performances of the pretrained model +(non-fine-tuned) with six different combinations of classifier +initialization schemes and layer finetuning configurations: ran- +dom+full, linear probing+full, WI+full, WI+partial, WI+PA, +WI+BN. The matcher is fixed to cosine similarity. These +correspond to row 4-9 in Table IV. +First, to compare different classifier initialization schemes, +we fix the fine-tuning scheme to full. When using random +initialization, rejection accuracy (DIR@FAR=0.001,0.01,0.1) +and closed-set accuracy (DIR@FAR=1) severely drops. For +linear probing, rejection accuracy improves while closed- +set accuracy drops. Only WI clearly improves the encoder +performance, supporting the superiority of weight imprinting. +Now we fix the classifier initialization to WI and compare +different layer finetuning configurations. full clearly has the +worst performance. While PA is better than partial in closed- +set accuracy, partial clearly outperforms PA in rejection ac- +curacy. BN outperforms all others in closed-set accuracy with +a large margin but sometimes falls behind partial in rejection +accuracy. +With the aid of the NAC matcher, our method WI+BN+NAC +outperforms all other methods in every aspect. Compared +to original, this gains 4.60%, 8.11%, 4.57%, 1.68% higher +DIR in average with respect to FAR of 0.001, 0.01, 0.1, 1.0, +respectively. +F. Analysis on Discriminative Quality of Different Fine-tuning +Methods +How do different layer finetuning configurations affect the +final OSFI performance? To analyze this, we adopt three +different metrics; inter-class separation, intra-class variance, +and Davies-Bouldin Index (DBI) [33]. The definitions of the +first two metrics are identical to that of Fig. 2. DBI is a metric +for evaluating the clustering quality, where DBI ≈ 0 means +perfect clustering. We compute these metrics on the gallery +features after fine-tuning, and the results are shown in Table +V. +Here we can easily separate these configurations into two +groups: full and partial vs PA and BN. The first group has + +VGG19 +ResNet-50 +ResNet-50 +75.5 +0.8 +70.5 +75.0 +74.5 +0.6 +(%) +70.0 +74.0 +AUC +69.5 +0.4 +Pretrained +73.5 +Full +- +Pretrained+cos:72.36% +69.0 +Partial +73.0 +0.2 +Pretrained+nac:73.42% +PA +72.5 +Cosine +68.5 +WI+BN+cos: 75.02% +BN +★ +NAC (best) +72.0 +0.0 +WI+BN+nac: 75.41% +2 +8 +16 +32 +64 +128 256512 1024C +2 +8 +16 +32 +64 +128 256512 1024C +0.0001 +0.0010 +0.0100 +0.1000 +1.0000 +k +k +False Alarm RateTABLE IV +DIR@FAR OF DIFFERENT METHODS ON CASIA-WEBFACE DATASET AND IJB-C DATASET, USING VGGNET-19 AND RESNET-50 AS THE ENCODER. +DIR@FAR=1 (100%) IS THE CLOSED-SET ACCURACY. THE HIGHEST VALUE IN EACH COLUMN IS MARKED IN BOLD. FOR THE FIRST THREE ROWS THE +ENCODER IS NOT FINE-TUNED AND ONLY THE MATCHERS ARE CHANGED. THE LAST ROW (WI+BN+NAC) IS OUR PROPOSED METHOD. +Encoder +Method +CASIA-WebFace +IJB-C +Classifier +initialization +Fine-tuning +layers +Matcher +DIR @ FAR (%) +DIR @ FAR (%) +0.1 +1.0 +10.0 +100.0 +0.1 +1.0 +10.0 +100.0 +VGG19 +None +None +cos +25.23 +52.97 +70.07 +80.89 +28.35 +45.55 +61.71 +73.80 +None +None +EVM +37.57 +57.75 +71.03 +80.78 +35.03 +53.64 +63.34 +73.70 +None +None +NAC +25.15 +55.68 +71.41 +80.89 +36.73 +51.92 +64.27 +73.80 +Random +Full +cos +23.95 +43.19 +59.03 +70.94 +17.18 +32.62 +46.90 +60.23 +Linear probing +Full +cos +28.82 +55.64 +70.44 +79.84 +30.80 +45.91 +59.63 +70.09 +WI +Full +cos +27.63 +57.58 +72.02 +80.94 +35.49 +50.52 +63.56 +73.53 +WI +Partial +cos +28.91 +57.31 +72.29 +81.16 +34.81 +51.98 +64.53 +73.89 +WI +PA +cos +26.29 +57.90 +72.82 +81.82 +31.74 +50.21 +64.26 +74.50 +WI +BN +cos +25.39 +56.65 +72.54 +82.14 +32.19 +48.74 +63.87 +74.43 +WI +BN +NAC +25.94 +58.01 +72.92 +82.14 +38.09 +53.08 +65.30 +74.43 +Res50 +None +None +cos +23.85 +58.06 +74.15 +83.69 +32.11 +48.05 +65.31 +76.96 +None +None +EVM +39.44 +61.61 +75.02 +83.57 +38.12 +38.12 +66.81 +76.96 +None +None +NAC +21.24 +60.23 +75.31 +83.69 +36.67 +54.53 +68.14 +76.96 +Random +Full +cos +25.31 +45.43 +60.80 +72.44 +14.88 +32.05 +49.39 +61.88 +Linear probing +Full +cos +28.35 +60.11 +74.63 +82.73 +30.35 +46.42 +61.90 +72.34 +WI +Full +cos +26.73 +63.92 +77.49 +84.65 +39.05 +56.00 +67.83 +76.94 +WI +Partial +cos +25.98 +64.66 +78.07 +85.02 +44.31 +57.11 +69.13 +77.49 +WI +PA +cos +24.89 +63.85 +77.58 +85.01 +36.69 +54.86 +68.30 +77.63 +WI +BN +cos +25.70 +65.83 +79.66 +86.73 +40.29 +55.71 +69.29 +78.74 +WI +BN +NAC +23.65 +67.72 +80.34 +86.73 +40.25 +58.25 +70.40 +78.74 +TABLE V +INTER-CLASS SEPARATION, INTRA-CLASS VARIANCE, DBI, AND AUC +GAIN BY USING NAC (REFER TO FIG. 5) FOR EACH LAYER FINETUNING +CONFIGURATION. THESE VALUES ARE AVERAGED ACROSS DATASETS AND +ENCODER ARCHITECTURES. ↑ MEANS THAT LARGER QUANTITY IS BETTER +AND VICE VERSA. +Inter (↑) +Intra (↓) +DBI (↓) +∆AUC (↑) +Pretrained Model +106.3◦ +34.5◦ +1.52 +0.740 +Full finetuning +106.7◦ +24.2◦ +0.87 +0.025 +Partial finetuning +106.4◦ +24.5◦ +0.90 +0.058 +Parallel Adapter +107.0◦ +31.8◦ +1.32 +0.135 +BN-only finetuning +107.3◦ +33.6◦ +1.46 +0.335 +similar inter-class separation with Pretrained and significantly +smaller intra-class variance, which leads to small DBI. This is +in stark contrast with the second group. +With this observation, we can conjecture the different opti- +mization strategies of each group. The first group was able to +easily reduce the training loss by collapsing the gallery fea- +tures into a single direction (shown by the small angle between +intra-class features). This was possible because both full and +partial directly updated the parameters of the convolutional +filters. On the other hand, all convolutional filters were frozen +for both PA and BN. This constraint may have prevented these +methods from taking the shortcut, i.e. simply collapsing the +gallery features, and instead led to separating the embeddings +of different identities. This explains why PA and BN have +higher closed-set accuracy. +This can also explain the AUC gain (∆AUC) when using +NAC instead of cosine similarity. Features become redundant +when they collapse, and so does the prototype. Therefore the +information from neighboring prototypes becomes less helpful +in rejecting unknown samples, leading to the marginal gain +from using NAC. This is why full and partial do not benefit +from using NAC matcher. +Fig. 6. The performance of our method against the baseline w.r.t. different +gallery size. AUC of DIR@FAR curve is used as the performance measure. +G. Performance with respect to Different Gallery Size +Fig. 6 shows the OSFI performance of our method against +the baseline (pretrained encoder with cos matcher) with respect +to different gallery size. We can see that our method consis- +tently improves upon the baseline, except for the extreme case +where only one image is provided for each identity. +V. CONCLUSION AND FUTURE WORKS +In this work we showed that combining weight-imprinted +classifier and BatchNorm-only tuning of the encoder effec- +tively improves the encoder’s OSFI performance without suf- +fering from overfitting. We further facilitated the performance +by our novel NAC matcher instead of the commonly used +cosine similarity. Future works will explore extending this idea +to the open-set few-shot recognition of generic images. +Acknowledgements: +This work was supported by the National Research Foundation +of Korea (NRF) grant funded by the Korea government (MSIP) +(NO. NRF-2022R1A2C1010710) + +IJB-C, ResNet-50 +CASIA-WebFace.ResNet-50 +80 +Pretrained +90 +Pretrained +Ours +Ours +AUC(%) +80 +70 +70 +60 +60 +50 +50 +2 +NumberofImagesperGalleryIdentity +NumberofImagesperGalleryIdentityREFERENCES +[1] A. K. Jain and S. Z. Li, Handbook of face recognition. +Springer, 2011, +vol. 1. +[2] K. Simonyan and A. Zisserman, “Very deep convolutional networks for +large-scale image recognition,” arXiv preprint arXiv:1409.1556, 2014. +[3] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual learning for image +recognition,” in Proceedings of the IEEE conference on computer vision +and pattern recognition, 2016, pp. 770–778. +[4] Q. Cao, L. Shen, W. Xie, O. M. Parkhi, and A. Zisserman, “Vggface2: +A dataset for recognising faces across pose and age,” in 2018 13th IEEE +international conference on automatic face & gesture recognition (FG +2018). +IEEE, 2018, pp. 67–74. +[5] Y. Guo, L. Zhang, Y. Hu, X. He, and J. Gao, “Ms-celeb-1m: A dataset +and benchmark for large-scale face recognition,” in European conference +on computer vision. +Springer, 2016, pp. 87–102. +[6] F. Schroff, D. Kalenichenko, and J. Philbin, “Facenet: A unified embed- +ding for face recognition and clustering,” in Proceedings of the IEEE +conference on computer vision and pattern recognition, 2015, pp. 815– +823. +[7] W. Liu, Y. Wen, Z. Yu, M. Li, B. Raj, and L. Song, “Sphereface: Deep +hypersphere embedding for face recognition,” in Proceedings of the +IEEE conference on computer vision and pattern recognition, 2017, pp. +212–220. +[8] H. Wang, Y. Wang, Z. Zhou, X. Ji, D. Gong, J. Zhou, Z. Li, and +W. Liu, “Cosface: Large margin cosine loss for deep face recognition,” +in Proceedings of the IEEE conference on computer vision and pattern +recognition, 2018, pp. 5265–5274. +[9] J. Deng, J. Guo, N. Xue, and S. Zafeiriou, “Arcface: Additive angular +margin loss for deep face recognition,” in Proceedings of the IEEE/CVF +Conference on Computer Vision and Pattern Recognition, 2019, pp. +4690–4699. +[10] X. Wang, S. Zhang, S. Wang, T. Fu, H. Shi, and T. Mei, “Mis-classified +vector guided softmax loss for face recognition,” in Proceedings of the +AAAI Conference on Artificial Intelligence, vol. 34, no. 07, 2020, pp. +12 241–12 248. +[11] Q. Meng, S. Zhao, Z. Huang, and F. Zhou, “Magface: A universal repre- +sentation for face recognition and quality assessment,” in Proceedings of +the IEEE/CVF Conference on Computer Vision and Pattern Recognition, +2021, pp. 14 225–14 234. +[12] H. Qi, M. Brown, and D. G. Lowe, “Low-shot learning with imprinted +weights,” in Proceedings of the IEEE conference on computer vision +and pattern recognition, 2018, pp. 5822–5830. +[13] S. Ioffe and C. Szegedy, “Batch normalization: Accelerating deep +network training by reducing internal covariate shift,” in International +conference on machine learning. +PMLR, 2015, pp. 448–456. +[14] F. Li and H. Wechsler, “Open set face recognition using transduction,” +IEEE transactions on pattern analysis and machine intelligence, vol. 27, +no. 11, pp. 1686–1697, 2005. +[15] M. Gunther, S. Cruz, E. M. Rudd, and T. E. Boult, “Toward open-set +face recognition,” in Proceedings of the IEEE Conference on Computer +Vision and Pattern Recognition Workshops, 2017, pp. 71–80. +[16] E. M. Rudd, L. P. Jain, W. J. Scheirer, and T. E. Boult, “The extreme +value machine,” IEEE transactions on pattern analysis and machine +intelligence, vol. 40, no. 3, pp. 762–768, 2017. +[17] R. Vareto, S. Silva, F. Costa, and W. R. Schwartz, “Towards open-set +face recognition using hashing functions,” in 2017 IEEE international +joint conference on biometrics (IJCB). +IEEE, 2017, pp. 634–641. +[18] B. Kulis and K. Grauman, “Kernelized locality-sensitive hashing,” IEEE +Transactions on Pattern Analysis and Machine Intelligence, vol. 34, +no. 6, pp. 1092–1104, 2011. +[19] G. Mateos-Aparicio, “Partial least squares (pls) methods: Origins, evo- +lution, and application to social sciences,” Communications in Statistics- +Theory and Methods, vol. 40, no. 13, pp. 2305–2317, 2011. +[20] H. Dao, D.-H. Nguyen, and M.-T. Tran, “Face recognition in the wild +for secure authentication with open set approach,” in International +Conference on Future Data and Security Engineering. +Springer, 2021, +pp. 338–355. +[21] A. Bendale and T. E. Boult, “Towards open set deep networks,” in +Proceedings of the IEEE conference on computer vision and pattern +recognition, 2016, pp. 1563–1572. +[22] D.-W. Zhou, H.-J. Ye, and D.-C. Zhan, “Learning placeholders for +open-set recognition,” in Proceedings of the IEEE/CVF Conference on +Computer Vision and Pattern Recognition, 2021, pp. 4401–4410. +[23] F. Wang, X. Xiang, J. Cheng, and A. L. Yuille, “Normface: L2 +hypersphere embedding for face verification,” in Proceedings of the 25th +ACM international conference on Multimedia, 2017, pp. 1041–1049. +[24] I. Masi, Y. Wu, T. Hassner, and P. Natarajan, “Deep face recognition: A +survey,” in 2018 31st SIBGRAPI conference on graphics, patterns and +images (SIBGRAPI). +IEEE, 2018, pp. 471–478. +[25] Anonymous, “Fine-tuning distorts pretrained features and underperforms +out-of-distribution,” in Submitted to The Tenth International Conference +on Learning Representations, 2022, under review. [Online]. Available: +https://openreview.net/forum?id=UYneFzXSJWh +[26] V. N. Vapnik and A. Y. Chervonenkis, “On the uniform convergence +of relative frequencies of events to their probabilities,” in Measures of +complexity. +Springer, 2015, pp. 11–30. +[27] J. Frankle, D. J. Schwab, and A. S. Morcos, “Training batchnorm and +only batchnorm: On the expressive power of random features in cnns,” +arXiv preprint arXiv:2003.00152, 2020. +[28] D. Yi, Z. Lei, S. Liao, and S. Z. Li, “Learning face representation from +scratch,” arXiv preprint arXiv:1411.7923, 2014. +[29] B. Maze, J. Adams, J. A. Duncan, N. Kalka, T. Miller, C. Otto, +A. K. Jain, W. T. Niggel, J. Anderson, J. Cheney et al., “Iarpa +janus benchmark-c: Face dataset and protocol,” in 2018 International +Conference on Biometrics (ICB). +IEEE, 2018, pp. 158–165. +[30] K. Zhang, Z. Zhang, Z. Li, and Y. Qiao, “Joint face detection and +alignment using multitask cascaded convolutional networks,” IEEE +Signal Processing Letters, vol. 23, no. 10, pp. 1499–1503, 2016. +[31] S.-A. Rebuffi, H. Bilen, and A. Vedaldi, “Efficient parametrization of +multi-domain deep neural networks,” 2018. +[32] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” +arXiv preprint arXiv:1412.6980, 2014. +[33] D. L. Davies and D. W. Bouldin, “A cluster separation measure,” IEEE +transactions on pattern analysis and machine intelligence, no. 2, pp. +224–227, 1979. + diff --git a/4dAzT4oBgHgl3EQf9f77/content/tmp_files/load_file.txt b/4dAzT4oBgHgl3EQf9f77/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..def5f887250bdfaac16f9f27b54ee5048d00f61a --- /dev/null +++ b/4dAzT4oBgHgl3EQf9f77/content/tmp_files/load_file.txt @@ -0,0 +1,831 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf,len=830 +page_content='Open-Set Face Identification on Few-Shot Gallery by Fine-Tuning Hojin Park, Jaewoo Park, and Andrew Beng Jin Teoh School of Electrical and Electronics Engineering College of Engineering, Yonsei University Seoul, Korea 2014142100@yonsei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='kr, julypraise@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='com, bjteoh@yonsei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='kr Abstract—In this paper, we focus on addressing the open- set face identification problem on a few-shot gallery by fine- tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The problem assumes a realistic scenario for face iden- tification, where only a small number of face images is given for enrollment and any unknown identity must be rejected during identification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We observe that face recognition models pretrained on a large dataset and naively fine-tuned models perform poorly for this task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Motivated by this issue, we propose an effective fine-tuning scheme with classifier weight imprinting and exclusive BatchNorm layer tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For further improvement of rejection accuracy on unknown identities, we propose a novel matcher called Neighborhood Aware Cosine (NAC) that computes similarity based on neighborhood information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We validate the effectiveness of the proposed schemes thoroughly on large-scale face benchmarks across different convolutional neural network architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The source code for this project is available at: https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='com/1ho0jin1/OSFI-by-FineTuning I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' INTRODUCTION Recently face recognition (FR) has achieved astonishing success attributed to three factors in large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Deep convolutional neural network (CNN) architectures [2], [3] that have strong visual prior were developed and leveraged as FR embedding models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Large-scale face datasets [4], [5] that cover massive identities with diverse ethnicity and facial variations became available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' On top of these, various metric learning losses [6]–[9] elevated the performance of deep FR models to an unprecedented level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The majority of FR embedding models have been evaluated on numerous benchmarks with closed-set identification [7]– [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The closed-set identification protocol assumes all probe identities present in the gallery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' However, in a realistic sce- nario, an unknown identity that is not enrolled may be en- countered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Another important but practical aspect to consider is the scarcity of intra-class samples for the gallery identities to be registered;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' namely, due to the expensive data acquisition cost and privacy issue, only a very small number of samples might be available for each gallery identity to register.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In this respect, open-set face identification (OSFI) with the small- sized gallery is closer to a real scenario as it needs to perform both known probe identity identification and unknown probe identity rejection based on the limited information from the small gallery set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Despite its versatile practical significance, however, OSFI with a small gallery has been rarely explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Devising a model specific to OSFI with a small gallery can be challenging in the following aspects: Firstly, an OSFI (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' (a) Full fine-tuning all parameters severely degrades the OSFI performance, while our method significantly improves the pre-trained model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Detection & Identification Rate (DIR) [1] quantifies both correct identification of the known probe identities and detection of the unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' (b) An outline of our proposed fine-tuning scheme: Given a model pretrained on a large-scale face database, we initialize the gallery set classifier by weight imprinting, and then fine-tune the model on a few-shot gallery set by training only the BatchNorm layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In the evaluation stage, a given probe is either accepted as known or rejected as an unknown identity based on novel similarity matcher dubbed Neighborhood Aware Cosine (NAC) matcher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' model performs both identifications of a known probe identity but also correct rejection of unknown probe identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Hence, conventional FR embedding models devised mainly for closed- set identification can perform poorly at the rejection of the unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In fact, as observed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1 (a), FR embedding models pretrained on a large-scale public face database are not effective for open-set identification, leaving room for improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This suggests the need for fitting the pretrained arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01922v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='CV] 5 Jan 2023 IJB-C CASIA-WebFace 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='9 Rate Pretrained Pretrained 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7 Full finetuning 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='8 Full finetuning &Identification Ours Ours 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='6 etection 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 FalseAlarmRate FalseAlarmRatePretrain Set Evaluation Set (disjoint from pretrain set) Known Unknown Gallery (Few-shot) Known Query Unknown Query Weight Imprinting Probe Accept M NAC 2Q Reject Evaluation Pretraining BatchNorm-only Fine-Tuningmodel to be more specific to the given gallery set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Secondly, due to the few-shot nature of the small-sized gallery set, there is a high risk of overfitting for fine-tuning the pretrained model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1 (a), full fine-tuning (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' updating all parameters) of the pretrained model results in severe performance degradation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This drives us to devise an overfitting-resilient parameter tuning scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Moreover, an ordinary cosine similarity matcher used in the closed-set identification might have a large tradeoff between the known probe identity identification and unknown probe identity rejection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' As will be observed in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' III-D, the simple cosine matcher has a severe drawback for the task at hand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This motivates us to devise a robust matcher for OSFI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Based on these observations, we propose an efficient fine- tuning scheme and a novel similarity-based matcher for OSFI constrained on a small gallery set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Our fine-tuning scheme consists of weight initialization of the classifier governed by weight imprinting (WI) [12] and training only BatchNorm (BN) layers [13] for overfitting-resilient adaptation on the small gallery set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Moreover, for both effective detection of the unknown and identification of the known probe identities, a novel Neighborhood Aware Cosine (NAC) matcher that respects the neighborhood information of the learned gallery features, and hence better calibrates the rejection score is proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Our contributions are summarized as follows: 1) To effectively solve the OSFI problem constrained on a small gallery set, we propose to fine-tune the pretrained face embedding model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Since full fine-tuning deterio- rates the embedding quality, we search for the optimal method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2) We demonstrate that the combination of weight imprint- ing and exclusive BatchNorm layer fine-tuning excels other baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 3) We recognize that the commonly used cosine similarity is a sub-optimal matcher for rejection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We propose a novel matcher named NAC that significantly improves the rejection accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' RELATED WORKS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Open Set Face Identification (OSFI) [14], one of the earliest works in OSFI, used their proposed Open-set TCM-kNN on top of features extracted by PCA and Fisher Linear Discriminant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [15] proposed their own OSFI protocol and showed that an extreme value machine [16] trained on the gallery set performs better than using cosine similarity or linear discriminant analysis for matchers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [17] trained a matcher composed of locality sensitive hashing [18] and partial least squares [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [20] applied OpenMax [21] and PROSER [22], two methods for open-set recognition of generic images, on top of extracted face features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' All previous works propose to train an open-set classifier (matcher) of some form, but all of them use a fixed encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' To the best of our knowledge, we are the first to propose an effective fine-tuning scheme as a solution to OSFI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Cosine Similarity-based Loss Functions [23] proposed to l2-normalize the features such that the train loss is only determined by the angle between the feature and the classifier weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [7] further extended this idea by applying a multiplicative margin to the angle between a feature and its corresponding weight vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This penalized the intra- class features to be gathered while forcing inter-class centers (prototypes) to be separated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' A number of follow-up papers such as [8]–[11] modify this angular margin term in different ways, but their motivations and properties are generally simi- lar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Therefore, in our experiments we only use CosFace loss [8] as a representative method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For comprehensive understanding of these loss functions, refer to [24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' APPROACH Our proposed approach is two-fold: fine-tuning on the gallery and open-set identification evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In the fine- tuning stage, the classifier is initialized by weight imprinting to initiate learning from optimal discriminative features, and the model is fine-tuned by updating only the BatchNorm layers to avoid overfitting on the few-shot gallery data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In evaluation, we utilize a novel matcher NAC that computes a neighborhood aware similarity for better-calibrated rejection of the unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We demonstrate that the combination of these three methods significantly outperforms all other baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Problem Definition and Metrics Formally, in an OSFI problem, we assume the availability of an encoder φ pretrained on a large-scale face database (FR embedding model), which is disjoint from the evaluation set with respect to identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The evaluation set consists of a gallery G = {(xG i , yG i )}Cm i=1 and a probe set Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The probe set Q is further divided into the known probe set K = {(xK i , yK i )} and the unknown probe set U = {(xU i , yU i )}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' G and K has no overlapping images x but shares same identities y ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=', C} whereas U has disjoint identities, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=', YU ∩ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=', C} = Ø.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' m refers to the number of images per identity in G, which we fix to 3 to satisfy the few-shot constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We allow the encoder to be fine-tuned over the gallery set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The evaluation of OSFI performance uses the detection and identification rate at some false alarm rate (DIR@FAR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' FAR=1 means we do not reject any probe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Note that unlike the general case shown in [1], here we only consider rank- 1 identification rate for DIR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Therefore, DIR@FAR=1 is the rank-1 closed-set identification accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Classifier Initialization by Weight Imprinting Due to the few-shot nature of the gallery set where we fine-tune on, the initialization of model parameters and, in particular, of classifier weights is crucial to avoid overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The most naive option is a random initialization of the classifier weight matrix W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Another commonly used strategy is linear probing [25], i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=', finding an optimized weight W that minimizes the classification loss over the frozen encoder embeddings φ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We experimentally find that, as seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2, both of these initialization schemes do not induce discriminative structure for the encoder embedding φ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In particular, during fine- tuning, each weight vector wc in the classifier acts as a center (or prototype) for the c-th class (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' identity).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2 shows that neither random initialization nor linear probing of wc derives optimally discriminative weight vectors wc, resulting in low quality of class separation of gallery features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Motivated from this issue, we propose to initialize by weight imprinting (WI), which induces the optimal discriminative quality for the gallery features: wc = � wc ∥� wc∥2 , � wc = 1 m � yG i =c φ(xG i ) (1) where ∥·∥2 is the l2 norm, and the embedding feature φ(x) is unit-normalized such that ∥φ(x)∥2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' As expected, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2 verifies that fine-tuning from the weight imprinted initialization achieves a much higher discriminative quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This shows the superiority of weight imprinting com- pared to random initialization and linear probing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Note that weight imprinting has been frequently used in FR embedding models [8], [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' However, the critical difference is that those models utilize weight imprinting only to prepare templates before evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In our case, the WI initialization is utilized particularly for fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' BatchNorm-only Fine-Tuning Choosing the appropriate layer to tune is another important issue for fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Moreover, due to the extremely small number of samples for each gallery identity, there is a risk of overfitting as suggested by the classical theory on the vc dimension [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In fact, a recent study [25] suggests that full fine-tuning hurts the pretrained filters including the useful convolutional filters learned from a large-scale database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' To minimize the negative effect of this deterioration, we fine-tune only the BatchNorm (BN) layers along with the classifier weight: min W, θBN L(W T φθ(x), y), θ = [θBN, θrest] (2) where θ refers to all parameters in the encoder φ = φθ and θBN and θrest respectively refers to BatchNorm parameters and the rest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' During fine-tuning, θrest is fixed with no gradient flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The loss function L can be a softmax cross-entropy, or widely used FR embedding model losses such as ArcFace [9] and CosFace [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Due to selective fine-tuning of only the BN layers (and clas- sifier weight), the convolutional filters learned from the large- scale pre-train database are simply transferred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The BN-only training is thus computationally efficient as it occupies only 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01% of the total parameters in the CNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Nevertheless, its model complexity is sufficient to learn a general image task as guaranteed by [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The Intra-class variance (left) and inter-class separation (right) of classifiers that are initialized by different schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' NormFace [23], CosFace [8] and ArcFace [9] loss are used for linear probing initialization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The weight imprinting initialization does not require training, thus stays constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' An unknown feature u placed be- tween gallery proto- types of class i and j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' ϵ is some small positive constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' TABLE I AVERAGE ANGLE (DEGREES) BETWEEN IJB-C PROBE FEATURE VECTORS AND THEIR TOP-K CLOSEST GALLERY PROTOTYPES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' THE THIRD COLUMN REFERS TO THE AVERAGE OF TOP-2 TO TOP-16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Encoder top-1 top-2 2∼16 Res50 K 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7◦ 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0◦ 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1◦ U 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='8◦ 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0◦ 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7◦ VGG19 K 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4◦ 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='2◦ 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4◦ U 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='9◦ 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='2◦ 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1◦ D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Neighborhood Aware Cosine Similarity The cosine similarity function is the most predominant matcher for contemporary face verification and identification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Denoting the probe feature vector as p and the gallery pro- totypes as {gj}C j=1, where gj := 1 m � yG i =j φ(xG i ) is the mean of all the normalized gallery feature vectors of class j, identification is performed by finding the maximum class index c = arg maxj=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=',C cos(p, gj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' On the other hand, in the extension to OSFI, the decision of accepting as known or rejecting as unknown can be formulated: max j=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=',C cos(p, gj) Accept ≷ Reject τ (3) where cos(p, q) = p ∥p∥2 · q ∥q∥2 is the cosine similarity between two feature vectors, τ is the rejection threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Now, consider an example illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The cosine matcher will assign the probe u to the identity i with the acceptance score 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='866, which is fairly close to the maximum score 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This value alone might imply that the probe is a known sample as it is close to the gallery identity i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' However, the probe feature vector is placed right in the middle of the identities i and j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The in-between placement of u suggests that the probe can be possibly unknown and thus should be assigned with a lesser value of the acceptance score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Motivated by this intuition, we propose the Neighborhood Aware Cosine (NAC) matcher that respects all top-k surround- ing gallery features: NAC(p, gi) = exp(cos(p, gi)) · 1[i ∈ Nk] � j∈Nk exp(cos(p, gj)) (4) Intra-classvariance Inter-class separation 。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='06 104 ° NormFace CosFace 80 ° 103 ° ArcFace 70 ° Weight Imprinting 102 ° Angle 60 ° 101 ° 。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='09 100 ° 40 ° 。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='66 98 ° 0 5 10 15 20 0 5 10 15 20 Epochs Epochs9i 30° 30° + E .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='9jFig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The distributions of scores for known (K) and unknown (U) probes of IJB-C dataset using cosine similarity (left) and NAC with k = 16 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The scores are min-max normalized and τ is set such that FAR=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01 for both cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' DIR=48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='05% (left) vs DIR=54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='53% (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' ResNet-50 was used as the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Here, Nk is the index set of k gallery prototypes that are nearest to the probe feature p, and 1 is the indicator function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The main goal of the NAC matcher is to improve the unknown rejection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Table I shows that known probe features are much closer to their closest prototype than the second-closest proto- type, unlike unknown probes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' By exploiting this phenomenon, the NAC matcher is able to assign a much smaller score to unknown probe, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' EXPERIMENTS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Datasets We use VGGFace2 [4] dataset for pretraining the encoders, and CASIA-WebFace [28] and IJB-C [29] for evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Us- ing MTCNN [30], we align and crop every images to 112x112 with equal parameters for all datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For VGGFace2, we remove all identities overlapping with the evaluation datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The evaluation datasets are equally split into two groups such that the number of known and unknown identities are equal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Then we randomly choose m=3 images of the known identities to create the gallery (G), and the rest are known probes (K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' All images of unknown identities are unknown probes (U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Table II summarizes the statistics of the datasets we use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Note that we chose every known identity to have more than 10 images such that there can be at least 7 probe samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Also note that IJB-C dataset consists of still images and video frames (video frames typically have poorer image quality).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We sample the gallery from still images and probes from video frames, which makes this dataset much challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We note that the protocol devised here can be regarded as an extension of that in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Baselines 1) Classifier Initialization: Along with Weight Imprinting (denoted WI), we report the results of using random ini- tialization and linear probing initialization as described in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' III-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2) Encoder Layer Fine-Tuning: Along with BatchNorm- only fine-tuning (denoted as BN), we explore tuning other layers of the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The simplest one is tuning every layer (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' all parameters of a model), which we denote as full.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The second is freezing the early layers and training only the deeper ones, which we denote as partial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We also consider the parallel residual adapter [31], which adds additional 1x1 convolutional TABLE II DATASET STATISTICS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' THE NUMBER INSIDE THE PARENTHESES REFERS TO THE AVERAGE NUMBER OF IMAGES PER IDENTITY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' FOR EVALUATION DATASETS, KNOWN IDENTITIES CONSIST OF THE GALLERY (G) AND KNOWN PROBE (K), WHERE THE GALLERY HAS 3 IMAGES PER IDENTITY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Pretrain # IDs (images / ID) VGGFace2 7,689 (354.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0) Evaluation Known (G + K) Unknown (U) CASIA-WebFace 5,287 (3+20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0) 5,288 (16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5) IJB-C 1,765 (3+15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='3) 1,765 (13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='9) TABLE III THE TOTAL NUMBER OF PARAMETERS AND NUMBER OF FINE-TUNED PARAMETERS FOR EACH ENCODER FINE-TUNING SCHEME.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' ‘+’ REFERS TO THE NUMBER OF ADDED PARAMETERS FOR THE PARALLEL ADAPTER.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' # Params (million) VGG19 Res50 Pretrained 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='88 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='58 Full fine-tuning 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='88 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='58 Partial fine-tuning 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='72 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='72 Parallel Adapter +2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='22 +3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='39 BN-only fine-tuning 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='03 filters to the original convolutional layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' During fine-tuning, only these additional filters are trained to capture the subtle difference in the new dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Note that the authors in [31] apply this technique to ResNet [3], hence the name residual parallel adapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' But this idea can be generally applied to CNNs without residual connection, hence we also apply this to a VGG-style network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We denote this as PA, referring to Parallel Adapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 3) Matcher: During OSFI evaluation, the vanilla cosine similarity matcher is adopted as the baseline matcher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' When the NAC matcher is used, we denote by NAC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For comparison, we also use the extreme value machine (EVM) proposed by [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We train the EVM on the gallery set with the best parameters found by the authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' In summary, classifier initialization methods we consider are {Random, Linear probing, WI}, fine-tuning layer configu- rations are {Full, Partial, PA, BN}, and matchers are {cos, EVM, NAC}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We test the OSFI performances among different combinations of these three components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Our proposed OSFI scheme is to use WI+BN+NAC jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Training Details We choose VGG19 [2] and ResNet-50 [3] for the encoders with the feature dimension 512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We pretrain these encoders on the VGGFace2 dataset with CosFace with scale=32, mar- gin=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4 as loss function until convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Then we fine-tune the encoder with different classifier initialization schemes and encoder layer configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' When using the linear probing initialization, we train the classifier until the training accuracy reaches 95%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We follow the encoder layer finetuning in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' IV-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For the partial fine-tuning, we only train the last 2 convolutional layers (Conv-BN-ReLU-Conv-BN-ReLU).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Table III shows the number of total and updated parameters for each fine-tuning scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' COS NAC (k=16) T Known Known Unknown Unknown 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='00Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The OSFI performance of cosine similarity and NAC with different values of k on IJB-C dataset, using VGGNet-19 (left) and ResNet-50 (mid) as the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The square markers refer to cosine similarity and star marks the optimal k for different layer fine-tuning methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' To summarize the OSFI performance into a single number, we used the area under the curve (AUC, %) of DIR@FAR curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' (Right) DIR@FAR curve of Pretrained and BN configuration using cosine similarity and NAC (k=16) as the matcher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Numbers in the legend show the AUC values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' When k = 1, NAC is replaced by cos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We fix the number of epochs to 20 and batch size to 128 for every method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We again use CosFace loss for consistency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For the optimizer we use Adam [32] with cosine annealing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The initial learning rate is set to 1e-4 for full and PA, and 1e- 3 for partial and BN, which we find as the optimal learning rate for each method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For data augmentation, we use random horizontal flipping and random cropping with the random scale from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7 to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The cropped images are resized to the original size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Optimal k for NAC Since the gallery set is too small, we cannot afford a separate validation set to individually optimize k for each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Instead, we attempt to find a global value that has optimal performance regardless of the fine-tuning method, if one exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We first fine-tune the encoders with different layer con- figurations, which gives us five different encoders includ- ing one without any fine-tuning;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' pretrained, full, partial, PA, and BN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Then we search the best parameter k for the NAC matcher by grid search strategy, where the grid is [2,4,8,16,32,128,256,512,1024,C], and C is the total number of identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Note that k = 1 refers to using cosine similarity instead of NAC, which we added for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Since a single-value objective is preferred, we use the area under the curve (AUC) of the DIR@FAR curve instead of DIR value at different FAR values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We repeat this process with different datasets and encoder architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The results are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We did not include the results of CASIA-WebFace as it shows a similar trend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Excluding k = 1 which is not NAC, the results show a smooth unimodal curve with a peak at k = 16 or 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This shows that the NAC matcher indeed has a globally optimal k value that is robust against different datasets, encoders, and fine-tune methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Thus we choose k = 16 (k = 32 also gives similar results) as the global parameter throughout this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Note that when k = C, NAC becomes equivalent to softmax function with cosine similarity logits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' However, this is notably inferior compared to k = 16, which implies that considering only the k-nearest is superior to considering every gallery prototype.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Comparison of Fine-Tuning Methods We compare the OSFI performances of the pretrained model (non-fine-tuned) with six different combinations of classifier initialization schemes and layer finetuning configurations: ran- dom+full, linear probing+full, WI+full, WI+partial, WI+PA, WI+BN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The matcher is fixed to cosine similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' These correspond to row 4-9 in Table IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' First, to compare different classifier initialization schemes, we fix the fine-tuning scheme to full.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' When using random initialization, rejection accuracy (DIR@FAR=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='001,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1) and closed-set accuracy (DIR@FAR=1) severely drops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' For linear probing, rejection accuracy improves while closed- set accuracy drops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Only WI clearly improves the encoder performance, supporting the superiority of weight imprinting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Now we fix the classifier initialization to WI and compare different layer finetuning configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' full clearly has the worst performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' While PA is better than partial in closed- set accuracy, partial clearly outperforms PA in rejection ac- curacy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' BN outperforms all others in closed-set accuracy with a large margin but sometimes falls behind partial in rejection accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' With the aid of the NAC matcher, our method WI+BN+NAC outperforms all other methods in every aspect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Compared to original, this gains 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='60%, 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='11%, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='57%, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='68% higher DIR in average with respect to FAR of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='001, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Analysis on Discriminative Quality of Different Fine-tuning Methods How do different layer finetuning configurations affect the final OSFI performance?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' To analyze this, we adopt three different metrics;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' inter-class separation, intra-class variance, and Davies-Bouldin Index (DBI) [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The definitions of the first two metrics are identical to that of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' DBI is a metric for evaluating the clustering quality, where DBI ≈ 0 means perfect clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We compute these metrics on the gallery features after fine-tuning, and the results are shown in Table V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Here we can easily separate these configurations into two groups: full and partial vs PA and BN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The first group has VGG19 ResNet-50 ResNet-50 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='8 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='6 (%) 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 AUC 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4 Pretrained 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 Full Pretrained+cos:72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='36% 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 Partial 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='2 Pretrained+nac:73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='42% PA 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 Cosine 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5 WI+BN+cos: 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='02% BN ★ NAC (best) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 WI+BN+nac: 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='41% 2 8 16 32 64 128 256512 1024C 2 8 16 32 64 128 256512 1024C 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0000 k k False Alarm RateTABLE IV DIR@FAR OF DIFFERENT METHODS ON CASIA-WEBFACE DATASET AND IJB-C DATASET, USING VGGNET-19 AND RESNET-50 AS THE ENCODER.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' DIR@FAR=1 (100%) IS THE CLOSED-SET ACCURACY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' THE HIGHEST VALUE IN EACH COLUMN IS MARKED IN BOLD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' FOR THE FIRST THREE ROWS THE ENCODER IS NOT FINE-TUNED AND ONLY THE MATCHERS ARE CHANGED.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' THE LAST ROW (WI+BN+NAC) IS OUR PROPOSED METHOD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Encoder Method CASIA-WebFace IJB-C Classifier initialization Fine-tuning layers Matcher DIR @ FAR (%) DIR @ FAR (%) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0 VGG19 None None cos 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='23 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='97 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='07 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='89 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='35 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='55 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='71 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='80 None None EVM 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='57 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='75 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='03 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='78 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='03 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='64 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='34 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='70 None None NAC 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='15 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='68 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='41 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='89 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='73 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='92 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='27 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='80 Random Full cos 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='95 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='19 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='03 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='94 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='18 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='62 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='90 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='23 Linear probing Full cos 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='82 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='64 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='44 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='84 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='80 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='91 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='63 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='09 WI Full cos 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='63 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='58 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='02 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='94 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='49 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='52 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='56 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='53 WI Partial cos 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='91 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='31 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='29 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='16 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='81 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='98 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='53 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='89 WI PA cos 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='29 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='90 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='82 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='82 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='74 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='21 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='26 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='50 WI BN cos 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='39 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='65 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='54 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='14 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='19 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='74 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='87 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='43 WI BN NAC 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='94 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='92 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='14 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='09 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='08 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='30 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='43 Res50 None None cos 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='85 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='06 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='15 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='69 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='11 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='05 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='31 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='96 None None EVM 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='44 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='61 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='02 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='57 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='12 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='12 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='81 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='96 None None NAC 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='24 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='23 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='31 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='69 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='67 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='53 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='14 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='96 Random Full cos 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='31 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='43 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='80 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='44 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='88 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='05 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='39 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='88 Linear probing Full cos 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='35 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='11 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='63 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='73 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='35 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='42 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='90 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='34 WI Full cos 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='73 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='92 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='49 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='65 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='05 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='00 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='83 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='94 WI Partial cos 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='98 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='66 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='07 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='02 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='31 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='11 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='13 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='49 WI PA cos 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='89 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='85 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='58 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='01 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='69 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='86 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='30 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='63 WI BN cos 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='70 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='83 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='66 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='73 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='29 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='71 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='29 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='74 WI BN NAC 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='65 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='72 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='34 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='73 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='25 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='25 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='40 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='74 TABLE V INTER-CLASS SEPARATION, INTRA-CLASS VARIANCE, DBI, AND AUC GAIN BY USING NAC (REFER TO FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 5) FOR EACH LAYER FINETUNING CONFIGURATION.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' THESE VALUES ARE AVERAGED ACROSS DATASETS AND ENCODER ARCHITECTURES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' ↑ MEANS THAT LARGER QUANTITY IS BETTER AND VICE VERSA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Inter (↑) Intra (↓) DBI (↓) ∆AUC (↑) Pretrained Model 106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='3◦ 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5◦ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='52 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='740 Full finetuning 106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7◦ 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='2◦ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='87 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='025 Partial finetuning 106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='4◦ 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='5◦ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='058 Parallel Adapter 107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='0◦ 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='8◦ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='135 BN-only finetuning 107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='3◦ 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='6◦ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='46 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='335 similar inter-class separation with Pretrained and significantly smaller intra-class variance, which leads to small DBI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This is in stark contrast with the second group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' With this observation, we can conjecture the different opti- mization strategies of each group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The first group was able to easily reduce the training loss by collapsing the gallery fea- tures into a single direction (shown by the small angle between intra-class features).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This was possible because both full and partial directly updated the parameters of the convolutional filters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' On the other hand, all convolutional filters were frozen for both PA and BN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This constraint may have prevented these methods from taking the shortcut, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' simply collapsing the gallery features, and instead led to separating the embeddings of different identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This explains why PA and BN have higher closed-set accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This can also explain the AUC gain (∆AUC) when using NAC instead of cosine similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Features become redundant when they collapse, and so does the prototype.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Therefore the information from neighboring prototypes becomes less helpful in rejecting unknown samples, leading to the marginal gain from using NAC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' This is why full and partial do not benefit from using NAC matcher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' The performance of our method against the baseline w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' different gallery size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' AUC of DIR@FAR curve is used as the performance measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Performance with respect to Different Gallery Size Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 6 shows the OSFI performance of our method against the baseline (pretrained encoder with cos matcher) with respect to different gallery size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We can see that our method consis- tently improves upon the baseline, except for the extreme case where only one image is provided for each identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' CONCLUSION AND FUTURE WORKS In this work we showed that combining weight-imprinted classifier and BatchNorm-only tuning of the encoder effec- tively improves the encoder’s OSFI performance without suf- fering from overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' We further facilitated the performance by our novel NAC matcher instead of the commonly used cosine similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Future works will explore extending this idea to the open-set few-shot recognition of generic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Acknowledgements: This work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIP) (NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' NRF-2022R1A2C1010710) IJB-C, ResNet-50 CASIA-WebFace.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='ResNet-50 80 Pretrained 90 Pretrained Ours Ours AUC(%) 80 70 70 60 60 50 50 2 NumberofImagesperGalleryIdentity NumberofImagesperGalleryIdentityREFERENCES [1] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Jain and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Li, Handbook of face recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Springer, 2011, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [2] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Simonyan and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zisserman, “Very deep convolutional networks for large-scale image recognition,” arXiv preprint arXiv:1409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='1556, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [3] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' He, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Ren, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 770–778.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [4] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Cao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Shen, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Xie, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Parkhi, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zisserman, “Vggface2: A dataset for recognising faces across pose and age,” in 2018 13th IEEE international conference on automatic face & gesture recognition (FG 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' IEEE, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 67–74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [5] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Guo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Hu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' He, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Gao, “Ms-celeb-1m: A dataset and benchmark for large-scale face recognition,” in European conference on computer vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Springer, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 87–102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [6] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Schroff, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Kalenichenko, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Philbin, “Facenet: A unified embed- ding for face recognition and clustering,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 815– 823.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [7] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Liu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Yu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Li, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Raj, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Song, “Sphereface: Deep hypersphere embedding for face recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 212–220.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [8] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhou, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Ji, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Gong, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhou, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Li, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Liu, “Cosface: Large margin cosine loss for deep face recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 5265–5274.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [9] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Deng, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Guo, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Xue, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zafeiriou, “Arcface: Additive angular margin loss for deep face recognition,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 4690–4699.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [10] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Fu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Shi, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Mei, “Mis-classified vector guided softmax loss for face recognition,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 34, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 07, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 12 241–12 248.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [11] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Meng, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Huang, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhou, “Magface: A universal repre- sentation for face recognition and quality assessment,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 14 225–14 234.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [12] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Qi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Brown, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Lowe, “Low-shot learning with imprinted weights,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 5822–5830.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [13] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Ioffe and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Szegedy, “Batch normalization: Accelerating deep network training by reducing internal covariate shift,” in International conference on machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' PMLR, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 448–456.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [14] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Li and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wechsler, “Open set face recognition using transduction,” IEEE transactions on pattern analysis and machine intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 27, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1686–1697, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [15] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Gunther, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Cruz, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Rudd, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Boult, “Toward open-set face recognition,” in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 71–80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [16] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Rudd, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Jain, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Scheirer, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Boult, “The extreme value machine,” IEEE transactions on pattern analysis and machine intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 40, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 762–768, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [17] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Vareto, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Silva, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Costa, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Schwartz, “Towards open-set face recognition using hashing functions,” in 2017 IEEE international joint conference on biometrics (IJCB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' IEEE, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 634–641.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [18] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Kulis and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Grauman, “Kernelized locality-sensitive hashing,” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 34, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1092–1104, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [19] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Mateos-Aparicio, “Partial least squares (pls) methods: Origins, evo- lution, and application to social sciences,” Communications in Statistics- Theory and Methods, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 40, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 13, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2305–2317, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [20] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Dao, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Nguyen, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='-T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Tran, “Face recognition in the wild for secure authentication with open set approach,” in International Conference on Future Data and Security Engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Springer, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 338–355.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Bendale and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Boult, “Towards open set deep networks,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1563–1572.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [22] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhou, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Ye, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhan, “Learning placeholders for open-set recognition,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 4401–4410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [23] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Xiang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Cheng, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Yuille, “Normface: L2 hypersphere embedding for face verification,” in Proceedings of the 25th ACM international conference on Multimedia, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1041–1049.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [24] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Masi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Wu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Hassner, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Natarajan, “Deep face recognition: A survey,” in 2018 31st SIBGRAPI conference on graphics, patterns and images (SIBGRAPI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' IEEE, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 471–478.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [25] Anonymous, “Fine-tuning distorts pretrained features and underperforms out-of-distribution,” in Submitted to The Tenth International Conference on Learning Representations, 2022, under review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Available: https://openreview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='net/forum?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='id=UYneFzXSJWh [26] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Vapnik and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Chervonenkis, “On the uniform convergence of relative frequencies of events to their probabilities,” in Measures of complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Springer, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 11–30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [27] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Frankle, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Schwab, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Morcos, “Training batchnorm and only batchnorm: On the expressive power of random features in cnns,” arXiv preprint arXiv:2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='00152, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [28] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Yi, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Lei, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Liao, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Li, “Learning face representation from scratch,” arXiv preprint arXiv:1411.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='7923, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [29] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Maze, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Adams, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Duncan, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Kalka, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Miller, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Otto, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Jain, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Niggel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Anderson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Cheney et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=', “Iarpa janus benchmark-c: Face dataset and protocol,” in 2018 International Conference on Biometrics (ICB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' IEEE, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 158–165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [30] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Zhang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Li, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Qiao, “Joint face detection and alignment using multitask cascaded convolutional networks,” IEEE Signal Processing Letters, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 23, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 1499–1503, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [31] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Rebuffi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Bilen, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Vedaldi, “Efficient parametrization of multi-domain deep neural networks,” 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [32] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Kingma and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Ba, “Adam: A method for stochastic optimization,” arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content='6980, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' [33] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Davies and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' Bouldin, “A cluster separation measure,” IEEE transactions on pattern analysis and machine intelligence, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} +page_content=' 224–227, 1979.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQf9f77/content/2301.01922v1.pdf'} diff --git a/5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf b/5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d81bf2f1927a25e22ef7b2a2d94264713bef4503 --- /dev/null +++ b/5NE0T4oBgHgl3EQfegCm/content/2301.02392v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc43fb406e9849d9b6f29aaf45e93641a47de3c45ea1b1ba644c43bf730acc6f +size 939303 diff --git a/5NE0T4oBgHgl3EQfegCm/vector_store/index.pkl b/5NE0T4oBgHgl3EQfegCm/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9bf448ddaa980ac6737428e4c6ae49396d829e87 --- /dev/null +++ b/5NE0T4oBgHgl3EQfegCm/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc83b6835b8dfb6f81ccf740510c30388f89cf6ebfa5c1aeff8b7ce86ef296f0 +size 108228 diff --git a/5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf b/5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ac86480ab055916cd7432c76b231faf9fa13e2cb --- /dev/null +++ b/5dAyT4oBgHgl3EQfpfgA/content/2301.00524v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93620e003c473cea5db2706ecd35f400c54fbe27d9ed022525a107ebfb74b73c +size 1813327 diff --git a/5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss b/5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..ff12735fc7a69847a4a657ab7ab6e90d8c206bd8 --- /dev/null +++ b/5dAyT4oBgHgl3EQfpfgA/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a35290918cb2d2b484900b1c330611ea66ded7fb8e09d91f077925449da77b13 +size 7209005 diff --git a/5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf b/5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fc8397e97ba9acb5e57ae4a8a8e27375525f26b8 --- /dev/null +++ b/5dE4T4oBgHgl3EQf1Q2P/content/2301.05289v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aae6f852013d37701d1d412e1822f0c6b1e9bc1dadc7a95038ea5833c9348dcd +size 595340 diff --git a/5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss b/5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..2c46d7df075432599a9e503a7deba04b3df8c196 --- /dev/null +++ b/5dE4T4oBgHgl3EQf1Q2P/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebea018128386d3d57fd47ce8ce2c139f98da18a7828e93b5eaaa31e686dc37a +size 7536685 diff --git a/5dE4T4oBgHgl3EQf1Q2P/vector_store/index.pkl b/5dE4T4oBgHgl3EQf1Q2P/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..bc70e704622da4a991386e0990e2ddcc9e687b4f --- /dev/null +++ b/5dE4T4oBgHgl3EQf1Q2P/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f4bbb5ef1f0f3c334cf3e2f8ff0ee2f0a1d24ce2b801ab588ef468ed7259a26 +size 307878 diff --git a/69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf b/69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1a82a6773cd887c2b3e54590131eabe2bac9d463 --- /dev/null +++ b/69E1T4oBgHgl3EQf7AXC/content/2301.03530v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db8eb38153d9c8b0ca334dd09b89ff026f99b30f80e9a9d5d007398395eae496 +size 2478477 diff --git a/69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss b/69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..5364d82e8193a366f7d1c36816669bc1e5952b12 --- /dev/null +++ b/69E1T4oBgHgl3EQf7AXC/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:268ca0a37dacc220d0401ce156eddd091b39909a4f2d125ea10f2734836bc00e +size 3932205 diff --git a/69E2T4oBgHgl3EQf7ggl/content/tmp_files/2301.04209v1.pdf.txt b/69E2T4oBgHgl3EQf7ggl/content/tmp_files/2301.04209v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5c1927e2f89b6c4354101b019fec0d6ac9e3b21 --- /dev/null +++ b/69E2T4oBgHgl3EQf7ggl/content/tmp_files/2301.04209v1.pdf.txt @@ -0,0 +1,1660 @@ +High Dimensional Analysis of Variance in Multivariate Linear +Regression +Zhipeng Lou1, Xianyang Zhang2 and Wei Biao Wu3 +January 12, 2023 +Abstract +In this paper, we develop a systematic theory for high dimensional analysis of variance in multivariate +linear regression, where the dimension and the number of coefficients can both grow with the sample +size. We propose a new U type test statistic to test linear hypotheses and establish a high dimensional +Gaussian approximation result under fairly mild moment assumptions. +Our general framework and +theory can be applied to deal with the classical one-way multivariate ANOVA and the nonparametric +one-way MANOVA in high dimensions. To implement the test procedure in practice, we introduce a +sample-splitting based estimator of the second moment of the error covariance and discuss its properties. +A simulation study shows that our proposed test outperforms some existing tests in various settings. +Keywords: Data-splitting; Gaussian approximation; Multivariate analysis of variance; One-way +layout; U statistics +1 +Introduction +In statistical inference of multivariate linear regression, a fundamental problem is to investigate the rela- +tionships between the covariates and the responses. In this article, we aim to test whether a given set of +covariates are associated with the responses by multivariate analysis of variance (MANOVA). To fix the idea, +we build the multivariate linear regression model with p predictors as +Yi = B⊤Xi + Vi (i = 1, . . . , n), +(1.1) +where Yi = (Yi1, . . . , Yid)⊤ and Xi = (Xi1, . . . , Xip)⊤ are respectively the response vector and the predictor +vector respectively for the ith sample, B⊤ = (B1, . . . , Bp) is the unknown coefficient matrix with Bk ∈ Rd +consisting of coefficients on the kth covariate, and the innovation vectors V1, . . . , Vn ∈ Rd are independent +and identically distributed random vectors with E(V1) = 0 and cov(V1) = Σ. The first element of Xi can be +set to be 1 to reflect an intercept term. Equivalently we can write (1.1) in compact matrix form as +Y = XB + V, +(1.2) +1Department of Operations Research and Financial Engineering, Princeton, NJ 08544. +2Department of Statistics, Texas A&M University, College Station, TX 77843. +3Department of Statistics, University of Chicago, Chicago, IL, 60637. +1 +arXiv:2301.04209v1 [stat.ME] 10 Jan 2023 + +where Y = (Y1, . . . , Yn)⊤, X = (X1, . . . , Xn)⊤ and V = (V1, . . . , Vn)⊤. Let C ∈ Rm×p be a matrix of rank m, +where m ∈ {1, . . . , p}. We are interested in testing a collection of linear constraints on the coefficient matrix +H0 : CB = 0 versus H1 : CB ̸= 0. +(1.3) +This testing problem has been extensively studied in the low dimensional setting where both the number +of predictors and the dimension of the response are relatively small compared to the sample size. A natural +and popular choice is the classical likelihood ratio test when the errors are normally distributed; see Chapter +8 in Anderson (2003) for a review of theoretical investigations. In recent years, high dimensional data are +increasingly encountered in various applications. Over the past decade, there have been tremendous efforts +to develop new methodologies and theories for high dimensional regression. The paradigm where d is 1 +or small and p can increase with n has received considerable attention, while on the other hand the one +where d is very large and p is relatively small has been less studied. The model (1.2) in the latter setting +has been applied to a number of research problems involving high-dimensional data types such as DNA +sequence data, gene expression microarray data, and imaging data; see for example Zapala and Schork +(2006), Wessel and Schork (2006) and Zapala and Schork (2012). Those related studies typically generate +huge amounts of data (responses) that, due to their expense and sophistication, are often collected on a +relatively small number of individuals, and investigate how the data can be explained by a certain number +of predictor variables such as the ages of individuals assayed, clinical diagnoses, strain memberships, cell +line types, or genotype information (Zapala and Schork, 2006). Owing to inappropriateness of applying the +standard MANOVA strategy and shortage of high-dimensional MANOVA theory, biological researchers often +considered some form of data reduction such as cluster analysis and factor analysis, which can suffer from +many problems, as pointed out by Zapala and Schork (2012). In the works Zapala and Schork (2006, 2012), +the authors incorporated a distance matrix to modify the standard MANOVA, but they commented that +there is very little published material that can be used to guide a researcher as to which distance measure is +the most appropriate for a given situation. Motivated by these real-world applications, we aim to develop a +general methodology for high dimensional MANOVA and lay a theoretical foundation for assessing statistical +significance. +The testing problem (1.3) for model (1.2) is closely related to a group of high dimensional hypothesis +tests. Two-sample mean test, for testing H0 : µ1 = µ2 where µ1 ∈ Rd and µ2 ∈ Rd are mean vectors of two +different populations, is a special case with p = 2, B = (µ1, µ2)⊤ and C = (1, −1). There is a large literature +accommodating the Hotelling T 2 type statistic into the high-dimensional situation where d is large; see for +example, Bai and Saranadasa (1996), Chen and Qin (2010), Srivastava et al. (2013) among many others. +It can be generalized to test the equality of multiple mean vectors in high dimensions. Some notable work +includes Schott (2007), Cai and Xia (2014), Hu et al. (2017), Li et al. (2017), Zhang et al. (2017) and Zhou +et al. (2017). In most existing work, the random samples were assumed to be Gaussian or follow some linear +structure as that of Bai and Saranadasa (1996). In contrast, the testing problem we are concerned is much +more general. For one thing, all the aforementioned high dimensional mean test problems can be fitted into +our framework, apart from which, we can deal with the more general multivariate linear regression in the +presence of an increasing number of predictor variables. For another, we do not assume the Gaussianity or +any particular structure of the error vectors {Vi}n +i=1. +Throughout the paper, we assume that p < n and the design matrix X is of full column rank such that +2 + +X⊤X is invertible. The conventional MANOVA test statistic for (1.3) is given by +Qn = |PY |2 +F = +n +� +i=1 +n +� +j=1 +PijY ⊤ +i Yj, +(1.4) +where | · |F stands for the Frobenius norm and +P = X(X⊤X)−1C⊤{C(X⊤X)−1C⊤}−1C(X⊤X)−1X⊤ = (Pij)n×n +is the orthogonal projection matrix onto the column space of the matrix X(X⊤X)−1C⊤. We shall reject the +null hypothesis H0 if Qn is larger than some critical value. In the univariate case where d = 1, the asymptotic +behavior of Qn has been extensively studied in literature; see G¨otze and Tikhomirov (1999) and G¨otze and +Tikhomirov (2002) for detailed discussions. The validity to perform a test for (1.3) using Qn when d is large +has been open for a long time. The first goal of the paper is to provide a solution to this open problem by +rigorously establishing a distributional approximation of the traditional MANOVA test statistic when d is +allowed to grow with n. Our key tool is the Gaussian approximation for degenerate U type statistics: under +fairly mild moment conditions, quadratic functionals of non-Gaussian random vectors can be approximated +by those of Gaussian vectors with the same covariance structure. It is worth mentioning that Chen (2018) +established a Gaussian approximation result for high dimensional non-degenerate U statistics by Stein’s +method, which can not be applied to the degenerate case here. From a technical point of view, we employ +completely different arguments to bound distance between the distribution functions of the test statistic and +its Gaussian analogue. +The main contributions of this paper are three-fold. Firstly, we develop a systematic theory for the +conventional MANOVA test statistic Qn in the high dimensional setting. More specifically, we shall establish +a dichotomy result: Qn can be approximated either by a linear combination of independent chi-squared +random variables or by a normal distribution under different conditions; see Theorem 2.1. While this reveals +the interesting theoretical properties of the test statistics, it causes difficulties in applications as one may +not know which asymptotic distribution to use in practice. To overcome this difficulty, as the second main +contribution of our paper, we propose using a new U type test statistic. Using the modified test statistic, +such a dichotomy does not appear; see Theorem 2.5 for the asymptotic result. Thirdly, we will propose a +new estimator for the second spectral moment of the covariance matrix via a data-splitting technique. To +the best of our knowledge, it is the first work concerning an unbiased and ratio consistent estimator in the +multivariate linear regression model. +We now introduce some notation. Let I{·} denote the indicator function. For random variables X ∈ R +and Y ∈ R, the Kolmogorov distance is defined by ρ(X, Y ) = supz∈R |P(X ≤ z) − P(Y ≤ z)|. For q > 0, +we write ∥X∥q = (E|X|q)1/q if E|X|q < ∞. +For two matrices A = (aij)i≤I,j≤J and B = (bij)i≤I,j≤J, +A ◦ B = (aijbij)i≤I,j≤J denotes their Hardmard product. For any positive integer m, we use Im to denote +m × m identity matrix. For two sequences of positive numbers (an) and (bn), we write an ≲ bn if there +exists some constant C such that an ≤ Cbn for all large n. We use C, C1, C2, . . . to denote positive constants +whose value may vary at different places. +3 + +2 +Theoretical results +We start with some notational definitions and basic assumptions. Let λ1(Σ) ≥ . . . ≥ λd(Σ) ≥ 0 denote the +eigenvalues of Σ = cov(V1) and let ς = |Σ|F = {�d +k=1 λ2 +k(Σ)}1/2. For q ≥ 2, we define +Mq = E +���� +V ⊤ +1 V2 +ς +���� +q +and Lq = E +���� +V ⊤ +1 ΣV1 +ς2 +���� +q/2 +. +(2.1) +Assumption 2.1. Recall that P11, . . . , Pnn are diagonal elements of the matrix P. Assume that +1 +m +n +� +i=1 +P 2 +ii → 0 as n → ∞. +Remark 1. Assumption 2.1 is quite natural and mild for testing (1.3). For instance, it automatically holds +for one sample test of mean vector as m−1 �n +i=1 P 2 +ii = 1/n. Additionally, in the context of K-sample test, as +discussed in Section 3.1, Assumption 2.1 is satisfied as long as the minimum sample size goes to infinity. More +generally, since �n +i=1 Pii = m, a simple sufficient condition for Assumption 2.1 would be max1≤i≤n Pii → 0. +Further discussions on this condition will be given in Remark 6 and Example 2.1. +2.1 +Asymptotic distribution of the conventional MANOVA test statistics +Under the null hypothesis CB = 0, PXB = X(X⊤X)−1C⊤{C(X⊤X)−1C⊤}−1CB = 0 and hence Qn = +|PXB + PV |2 +F +H0 += |PV |2 +F, which can be further decomposed as +Qn +H0 += +n +� +i=1 +n +� +j=1 +PijV ⊤ +i Vj = +n +� +i=1 +PiiV ⊤ +i Vi + +n +� +i=1 +� +j̸=i +PijV ⊤ +i Vj =: Dn + Q⋆ +n. +(2.2) +Observe that Dn is a weighted sum of i.i.d. random variables and Q⋆ +n is a second order non-degenerate U - +statistic of high dimensional random vectors. These two terms can be differently distributed under the high +dimensional setting. More specifically, since Dn and Q⋆ +n are uncorrelated, we have var(Qn) = var(Dn) + +var(Q⋆ +n), where +var(Dn) = +n +� +i=1 +P 2 +ii∥E0(V ⊤ +1 V1)∥2 +2 and var(Q⋆ +n) = 2 +� +m − +n +� +i=1 +P 2 +ii +� +ς2, +where E0(V ⊤ +1 V1) = V ⊤ +1 V1 − E(V ⊤ +1 V1). When the dimension d increases with the sample size n, the mag- +nitudes of var(Dn) and var(Q⋆ +n) can be quite different for non-Gaussian {Vi}n +i=1; cf. Example 4.1. As a +consequence, Qn can exhibit different asymptotic null distributions. More precisely, to asymptotically quan- +tify the discrepancy between var(Dn) and var(Q⋆ +n), under Assumption 2.1, we define +Λ2 = +�n +i=1 P 2 +ii∥E0(V ⊤ +1 V1)∥2 +2 +mς2 +. +Before presenting the distributional theory for Qn, we first define its Gaussian analogue. Let Z1, . . . , Zn be +i.i.d. N(0, Σ) Gaussian random vectors and write Z = (Z1, . . . , Zn)⊤. Then the Gaussian analogue of Qn is +defined as the same quadratic functional of {Zi}n +i=1, +Gn = |PZ|2 +F = +n +� +i=1 +n +� +j=1 +PijZ⊤ +i Zj. +(2.3) +4 + +Theorem 2.1. Let q = 2 + δ, where 0 < δ ≤ 1. Suppose Assumption 2.1 holds and +∆q = +�n +i=1 +� +j̸=i |Pij|q +mq/2 +Mq + +�n +i=1 P q/2 +ii +mq/2 +Lq → 0. +(2.4) +1. Assume Λ → 0. Then, under (2.4) and the null hypothesis, we have +ρ(Qn, Gn) ≤ C1Λ2/5 + Cq∆1/(2q+1) +q ++ C2 +� +1 +m +n +� +i=1 +P 2 +ii +�1/5 +→ 0. +2. Assume Λ → ∞ and the Lindeberg condition holds for Wi = E0(PiiV ⊤ +i Vi)/(Λς√m), that is, �n +i=1 E(W 2 +i I{|Wi| > +ϵ}) → 0 for any ϵ > 0. Then, under the null hypothesis, we have +Qn − mtr(Σ) +Λς√m +⇒ N(0, 1). +(2.5) +Remark 2. Theorem 2.1 illustrates an interesting dichotomy: the conventional MANOVA test statistic +Qn can have one of the two different asymptotic null distributions, depending on the magnitude of the +unknown quantity Λ. +This nature of dichotomy poses extra difficulty for utilizing Qn to test (1.3) in +practical implementation as we need to predetermine which asymptotic distribution to use. Any subjective +choice may lead to unreliable conclusion. +To illustrate this, suppose now Λ → 0. +For α ∈ (0, 1), let +G−1 +n (α) denote the (1 − α)th quantile of Gn. Based on Theorem 2.1, an α level test for (1.3) is given by +Φ0 = I{Qn > G−1 +n (α)}. However, if one implements Φ0 under the case where Λ → ∞, then the type I error +of Φ0 satisfies that P(Φ0 = 1 | H0) → 1/2, which implies that Φ0 in this scenario (Λ → ∞) is no better than +random guessing. +Remark 3. Recently much attention has been paid to studying the dichotomy and similar phase transition +phenomenon of the asymptotic distribution of classical tests under the high dimensional setting. For instance, +Xu et al. (2019) studied the Pearson’s chi-squared test under the scenario where the number of cells can +increase with the sample size and demonstrated that the corresponding asymptotic distribution can be either +chi-squared or normal. He et al. (2021) derived the phase transition boundaries of several standard likelihood +ratio tests on multivariate mean and covariance structures of Gaussian random vectors. In addition to these +tests, we suspect similar phenomenon can occur for many other traditional tests as the dimension increases +with the sample size. More importantly, as in our paper, investigating these phase transition phenomena +of classical tests not only contributes to the theoretical development but also motivates us to propose new +test procedure or more advanced approximation distributional theory which are suitable under the high +dimensional scenario. +The following lemma establishes an upper bound for ∆q. +Lemma 2.2. Assuming that Mq < ∞, then we have +∆q < 2 +� 1 +m max +1≤i≤n Pii +�δ/2 +Mq. +Remark 4. Condition (2.4) can be viewed as the Lyapunov-type condition for high dimensional Gaussian +approximation of Qn. It is quite natural and does not impose any explicit restriction on the relation between +5 + +the dimension d and the sample size n directly. In particular, (2.4) can be dimension free for some commonly +used models, namely, (2.4) holds for arbitrary dimension d ≥ 1 as long as n → ∞. For instance, suppose +that {Vi}n +i=1 follow the linear process model +Vi = Aξi (i = 1, . . . , n), +(2.6) +where A is a d × L matrix for some integer L ≥ 1, ξi = (ξi1, . . . , ξiL)⊤ and {ξiℓ}i,ℓ∈N are independent zero- +mean random variables with uniformly bounded qth moment E|ξiℓ|q ≤ C < ∞. Applying the Burkholder +inequality leads to Mq ≤ (1 + δ)q max1≤ℓ≤L ∥ξiℓ∥2q +q . +Consequently, Lemma 2.2 reveals that a sufficient +condition for ∆q → 0 is +1 +m max +1≤i≤n Pii → 0. +(2.7) +It is worth mentioning that (2.7) depends only on the projection matrix P and does not impose any re- +striction on the dimension d. Moreover, under Assumption 2.1, (2.7) is automatically satisfied in view of +max1≤i≤n(Pii/m)2 ≤ m−2 �n +i=1 P 2 +ii → 0. +2.2 +Modified U type test statistics +The dichotomous nature of the asymptotic null distribution makes Qn unsuitable for testing (1.3) in the high +dimensional setting. This motivates us to propose a modified U type test statistic of Qn for which such a +dichotomy does not occur. To fix the idea, let B0 ∈ Rp×d denote the coefficient matrix of model (1.2) under +the null hypothesis such that CB0 = 0 and Y +H0 += XB0 + V . Motivated by Theorem 2.1, a natural candidate +of the test statistic Qn would be +Qn,0 = Qn − +n +� +k=1 +Pkk(Yk − B⊤ +0 Xk)⊤(Yk − B⊤ +0 Xk), +(2.8) +which coincides with Q⋆ +n in (2.2) under the null hypothesis. However, B0 is unknown in practice and hence +Qn,0 is infeasible. The primary goal of this section is to propose a consistent empirical approximation Un for +Qn,0. In particular, motivated by the discussions in Section 2.1, the modified test statistic Un should satisfy +that +Un +H0 += +n +� +i=1 +� +j̸=i +KijV ⊤ +i Vj and +Un − Qn,0 +√var(Qn,0) +H0 += oP(1), +for some symmetric matrix K = (Kij)n×n. The latter ensures that Un is asymptotically equivalent to Qn,0 +in (2.8). Towards this end, let �B0 be the least square estimator of B under the constraint CB = 0. Then +Y − X �B0 = (In − P0)Y , where P0 = X(X⊤X)−1X⊤ − P is the projection matrix of model (1.2) under the +null hypothesis. In view of (2.8), the modified U type test statistic is then defined by +Un = Qn − +n +� +k=1 +θk(Yk − �B⊤ +0 Xk)⊤(Yk − �B⊤ +0 Xk) +H0 += +n +� +i=1 +� +Pii − +n +� +k=1 +θk ¯P 2 +ik,0 +� +V ⊤ +i Vi + +n +� +i=1 +� +j̸=i +� +Pij − +n +� +k=1 +θk ¯Pik,0 ¯Pjk,0 +� +V ⊤ +i Vj += +n +� +i=1 +� +j̸=i +� +Pij − +n +� +k=1 +θk ¯Pik,0 ¯Pjk,0 +� +V ⊤ +i Vj, +(2.9) +6 + +where ¯P0 = In − P0 = ( ¯Pij,0)n×n and the last equality follows by taking θ1, . . . , θn to be the solutions of the +following linear equations +n +� +k=1 +¯P 2 +ik,0θk = Pii (i = 1, . . . , n). +(2.10) +It is worth mentioning that typically θk in (2.9) are not Pkk, as one would naturally like to use in view +of (2.8). We can view (2.10) as a detailed balanced condition as it removes the diagonals in (2.9). Denote +θ = (θ1, . . . , θn)⊤ and rewrite (2.10) in the more compact matrix form +( ¯P0 ◦ ¯P0)θ = (P11, . . . , Pnn)⊤. +(2.11) +Let Pθ = P − ¯P0Dθ ¯P0 = (Pij,θ)n×n, where Dθ = diag(θ1, . . . , θn) is a diagonal matrix. Then Pii,θ = 0 for +all i = 1, . . . , n in view of (2.11) and +Un +H0 += tr(V ⊤PθV ) = +n +� +i=1 +� +j̸=i +Pij,θV ⊤ +i Vj. +Before proceeding, we first introduce a sufficient condition such that Un exists and is well defined. +Lemma 2.3. Assume that there exists a positive constant ϖ0 < 1/2 such that +max +1≤i≤n Pii,0 ≤ ϖ0. +(2.12) +Then the matrix ¯P0◦ ¯P0 is strictly diagonally dominant and |Pθ|2 +F = m−�n +i=1 θiPii. Moreover, if max1≤i≤n Pii ≤ +ϖ1ζ for some positive constant ϖ1 < 1/2, where ζ = (1 − 2ϖ0)(1 − ϖ0), then we have max1≤i≤n |θi| ≤ ϖ1 < +1/2. +Remark 5. Condition (2.12) ensures the matrix ¯P0 ◦ ¯P0 is invertible. Consequently the solution θ of (2.11) +exists and is unique. It is worth noting that θ is independent of the dimension d and only depends on the +projection matrices P and P0. Moreover, as shown in the proof of Lemma 2.3, +n +� +i=1 +θiPii ≤ 1 +ζ +n +� +i=1 +P 2 +ii and +max +1≤i≤n |θi| ≤ 1 +ζ max +1≤i≤n Pii, +which are essential to upper bound the quantity ∆q,θ in Lemma 2.6 below. Consequently, under Assump- +tion 2.1, suppose �n +i=1 P 2 +ii ≤ mζ/2 for sufficiently large n, we obtain +var(Un) = 2|Pθ|2 +Fς2 = 2 +� +m − +n +� +i=1 +θiPii +� +ς2 > mς2, +which ensures the proposed test statistic Un is non-degenerate and well defined. +Remark 6. Since col(X(X⊤X)−1C⊤) ⊂ col(X), where col(·) denotes the column space, P0 = X(X⊤X)−1X⊤− +P defined above is also a projection matrix. +Hence max{Pii, Pii,0} ≤ X⊤ +i (X⊤X)−1Xi uniformly for +i ∈ {1, . . . , n} and a sufficient condition for Lemma 2.3 would be +max +1≤i≤n X⊤ +i (X⊤X)−1Xi ≤ min{ϖ0, (1 − 2ϖ0)(1 − ϖ0)ϖ1}, +(2.13) +7 + +which is fairly mild on the design matrix X. +More specifically, it is commonly assumed (Huber, 1973, +Portnoy, 1985, Wu, 1986, Shao and Wu, 1987, Shao, 1988, Mammen, 1989, Navidi, 1989, Lahiri, 1992) +for the linear regression model that max1≤i≤n X⊤ +i (X⊤X)−1Xi → 0, which ensures a kind of “robustness of +design” (Huber, 1973). It also implies Assumption 2.1 in view of Remark 1 and can be viewed as a imbalance +measure of model (1.2) (Shao and Wu, 1987). +Example 2.1. Suppose X1, . . . , Xn are independent Gaussian random vectors N(0, Γ), where the covariance +matrix Γ ∈ Rp×p has minimal eigenvalue λmin(Γ) > 0. Then, with probability at least 1−2 exp(−n/2)−n−1, +we have +max +1≤i≤n X⊤ +i (X⊤X)−1Xi ≤ 9p + 18√2p log n + 36 log n +n +. +(2.14) +Consequently, condition (2.13) holds with high probability as long as p/n is sufficiently small. +Proposition 2.4. Under the conditions of Lemma 2.3, we have E(Un) ≥ 0. In particular, +E(Un) = 0 if and only if CB = 0. +2.3 +Asymptotic distribution of the modified test statistics +The primary goal of this section is to establish a Gaussian approximation for the modified test statistic Un. +Following (2.3), the Gaussian analogue of Un is defined by +Gn = tr(Z⊤PθZ) = +n +� +i=1 +� +j̸=i +Pij,θZ⊤ +i Zj. +The following theorem establishes a non-asymptotic upper bound of the Kolmogorov distance between the +distribution functions of Un and its Gaussian analogue Gn. Compared with Theorem 2.1, it reveals that +the modification of the test statistic Qn in (2.9) removes the dichotomous nature of its asymptotic null +distribution. +Theorem 2.5. Let q = 2 + δ, where 0 < δ ≤ 1. Assume that (2.12) holds and that +∆q,θ = +�n +i=1 +� +j̸=i |Pij,θ|q +mq/2 +Mq + +�n +i=1(� +j̸=i P 2 +ij,θ)q/2 +mq/2 +Lq → 0. +Then, under Assumptions 2.1 and the null hypothesis, we have +ρ(Un, Gn) ≤ Cq∆1/(2q+1) +q,θ ++ C +� +1 +m +n +� +i=1 +P 2 +ii +�1/5 +→ 0. +Similar to Lemma 2.2, we establish a similar upper bound for ∆q,θ in the following lemma. +Lemma 2.6. Under condition (2.12), we have +∆q,θ ≲ +� 1 +m max +1≤i≤n Pii +�δ/2 +Mq. +8 + +For α ∈ (0, 1), Proposition 2.4 and Theorem 2.5 motivate an α level test for (1.3) as follows, +Φθ = I +� +Un +ς|Pθ|F +√2 > c1−α +� +, +(2.15) +where c1−α is the (1 − α)th quantile of the standardized Gn/√var(Gn). +Remark 7. It is worth mentioning that the approximating distribution Gn may or may not be asymptotically +normal. +Let λ1(Pθ), . . . , λn(Pθ) denote the eigenvalues of the symmetric matrix Pθ. +Being a quadratic +functional of Gaussian random vectors {Zi}n +i=1, Gn is distributed as a linear combination of independent +chi-squared random variables, +Gn +D= +d +� +k=1 +n +� +i=1 +λk(Σ)λi(Pθ)ηik(1) = +d +� +k=1 +n +� +i=1 +λk(Σ)λi(Pθ){ηik(1) − 1}, +where {ηik(1)}i,k∈N are independent χ2 +1 random variables and the last equality follows from the fact that +�n +i=1 λi(Pθ) = �n +i=1 Pii,θ = 0. More specifically, the Lindeberg-Feller central limit theorem and Lemma 2.3 +imply that Gn/√var(Gn) ⇒ N(0, 1) if and only if +λ1(Σ) +ς√m → 0. +(2.16) +Consequently, c1−α in (2.15) is asymptotically equal to the standard normal quantiles whenever (2.16) holds. +When m → ∞, condition (2.16) automatically holds for arbitrary dimension d ≥ 1 as λ1(Σ) ≤ ς. +Otherwise, (2.16) is equivalent to tr(Σ4)/ς4 → 0, which is a common assumption to ensure the asymptotic +normality of high dimensional quadratic statistics; see, for example, Bai and Saranadasa (1996), Chen and +Qin (2010), Cai and Ma (2013), Yao et al. (2018) and Zhang et al. (2018) among others. In particular, it +reveals that the asymptotic null distribution of Un can be non-normal if (2.16) is violated. For example, let +Y1, . . . , Yn ∈ Rd be i.i.d. random vectors with mean vector µY = E(Y1) and consider testing whether µY = 0. +Assume that Σ = cov(Y1) = (Σjk)d×d has entries Σjk = ϑ + (1 − ϑ)I{j = k} for some constant ϑ ∈ (0, 1). +Then λ1(Σ)/(ς√m) → 1 and it follows from Theorem 2.5 that +Un +√var(Un) = +�n +i=1 +� +j̸=i Y ⊤ +i Yj +ς√{2n(n − 1)} +⇒ χ2 +1 − 1 +√2 +. +The simulation study in Section 5 shows that our Gaussian multiplier bootstrap approach have a satisfactory +performance regardless of whether Un is asymptotically normal or not. +3 +Applications +As mentioned in the introduction, our paradigm (1.3) is fairly general and it can be applied to many +commonly studied hypothesis testing problems. In this section, we consider two specific examples to illustrate +the usefulness of the proposed U type test statistic and the corresponding asymptotic distribution theory. +3.1 +High dimensional one-way MANOVA +Let {Yij}ni +j=1, i = 1, . . . , K, be K ≥ 2 independent samples following the model +Yij = µi + Vij (j = 1, . . . , ni; i = 1, . . . , K), +9 + +where µ1, . . . , µK ∈ Rd are unknown mean vectors of interest, {Vij}j∈N are i.i.d. d-dimensional random +vectors with E(Vi1) = 0 and cov(Vi1) = Σ. We are interested in testing the equality of the K mean vectors, +namely, testing the hypotheses +H0 : µ1 = . . . = µK versus H1 : µi ̸= µl for some 1 ≤ i ̸= l ≤ K. +Following the construction of (2.9), we propose the U type test statistic +UnK = +K +� +i=1 +Pii,K +ni +� +j=1 +� +k̸=j +Y⊤ +ijYik + +K +� +i=1 +� +l̸=i +Pil,K +ni +� +j=1 +nl +� +k=1 +Y⊤ +ijYlk, +(3.1) +where n = �K +i=1 ni is the total sample size, +Pii,K = +1 +n − 2 +� n +ni +− n + K − 2 +n − 1 +� +and Pil,K = +1 +n − 2 +� 1 +ni ++ 1 +nl +− n + K − 2 +n − 1 +� +. +In the context of two sample test for mean vectors where K = 2, UnK in (3.1) reduces to +UnK = +�n1 +i=1 +� +j̸=i +�n2 +k=1 +� +l̸=k(Y1i − Y2k)⊤(Y1j − Y2l) +(n − 1)(n − 2)n1n2/n +, +which coincides with the commonly used U type test statistic (Chen and Qin, 2010). +For each i ∈ {1, . . . , K}, let {Zij}j∈N be i.i.d. centered Gaussian random vectors with covariance matrix +cov(Zij) = Σ. Following (2.3), the Gaussian analogue of UnK is defined by +GnK = +K +� +i=1 +Pii,K +ni +� +j=1 +� +k̸=j +Z⊤ +ijZik + +K +� +i=1 +� +l̸=i +Pil,K +ni +� +j=1 +nl +� +k=1 +Z⊤ +ijZlk. +Let nmin = min1≤l≤K nl. Since max1≤i≤n Pii ≤ n−1 +min, Assumption 2.1 holds as long as nmin → ∞. The +following proposition establishes a non-asymptotic upper bound on the Kolmogorov distance between the +distribution functions of UnK and GnK. +Proposition 3.1. Let q = 2 + δ for some 0 < δ ≤ 1. Assume that nmin → ∞ and +� +Mq = +max +1≤l,l′≤K E +���� +V⊤ +l1Vl′2 +ς +���� +q +< ∞, where ς = |Σ|F. +Then, under the null hypothesis, we have +ρ(UnK, GnK) ≤ Cq +� +� +Mqn−δ/2 +min +�1/(2q+1) +→ 0. +Remark 8. It is worth mentioning that both the dimension d and the number of groups K can grow with the +total sample size n. In particular, as discussed in Remark 4, if all the K samples follow the linear process +model in (2.6), ρ(UnK, GnK) → 0 as long as nmin → ∞. +10 + +3.2 +High dimensional nonparametric one-way MANOVA +For each i ∈ {1, . . . , K}, let Fi denote the distribution function of Yi1. We consider testing whether these +K independent samples are equally distributed, namely, testing the hypotheses +H0 : F1 = . . . = FK versus H1 : Fi ̸= Fl for some 1 ≤ i ̸= l ≤ K. +(3.2) +Being fundamental and important in statistical inference, (3.2) has been extensively studied; see, for example, +Kruskal and Wallis (1952), Akritas and Arnold (1994), Brunner and Puri (2001), Rizzo and Sz´ekely (2010) +and Thas (2010) among many others. However, all the aforementioned works mainly focus on the traditional +low dimensional scenario and testing (3.2) for high dimensional random vectors has been much less studied. +In this section, we propose a new U type test statistic for (3.2) following the intuition of (2.9) and establish +the corresponding distributional theory. In particular, our asymptotic framework is fairly general and allows +both the dimension d and the number of groups K to grow with n. +To begin with, for each i ∈ {1, . . . , K}, let φi(t) = E{exp(ıt⊤Yij)} denote the characteristic function of +Yij, where ı stands for the imaginary unit. Then it is equivalent to test the hypotheses +H0 : φ1 = . . . = φK versus H1 : φi ̸= φl for some 1 ≤ i ̸= l ≤ K. +(3.3) +Denote Yij(t) = exp(ıt⊤Yij). Similar to (3.1), our test statistic for (3.3) is defined by +�UnK = +K +� +i=1 +Pii,K +ni +� +j=1 +� +k̸=j +� +Yij(t)Yik(t)w(t)dt + +K +� +i=1 +� +l̸=i +Pil,K +ni +� +j=1 +nl +� +k=1 +� +Yij(t)Ylk(t)w(t)dt, +where w(t) ≥ 0 is a suitable weight function such that the integrals above are well defined. Discussions of +some commonly used weight functions are given in Remark 9 below. +Before proceeding, we first define the Gaussian analogue of �UnK under the null hypothesis that the K +samples are equally distributed. Define the covariance function of Y11(t) as +Σ(t, s) = E{Y11(t) − φ1(t)}{Y11(s) − φ1(s)} = φ1(t − s) − φ1(t)φ1(−s) (t, s ∈ Rd). +Throughout this section, by Mercer’s theorem, we assume that the covariance function above admits the +following eigendecomposition +Σ(t, s) = +∞ +� +m=1 +λmϕm(t)ϕm(s) (t, s ∈ Rd), +where λ1 ≥ λ2 ≥ . . . ≥ 0 are eigenvalues and ϕ1, ϕ2, . . ., are the corresponding eigenfunctions. We now +apply the Karhunen–Lo`eve theorem. Let {Zijk}i,j,k∈N be independent standard normal random variables +and define Gaussian processes +Zij(t) = +∞ +� +m=1 +√λmZijmϕm(t) (t ∈ Rd). +Then, following (2.3), the Gaussian analogue of �UnK is defined by +�GnK = +K +� +i=1 +Pii,K +ni +� +j=1 +� +k̸=j +� +Zij(t)Zik(t)w(t)dt + +K +� +i=1 +� +l̸=i +Pil,K +ni +� +j=1 +nl +� +k=1 +� +Zij(t)Zlk(t)w(t)dt. +11 + +Proposition 3.2. Let q = 2 + δ for some 0 < δ ≤ 1. Assume that nmin → ∞ and +� +Mq = E +����� +� +Rd E{Y11(t)}E0{Y12(t)}w(t)dt +F +����� +q +< ∞, where F2 = +∞ +� +m=1 +λ2 +m. +Then, under the null hypothesis that these K independent samples are equally distributed, we have +ρ(�UnK, �GnK) ≤ Cq +� +� +Mqn−δ/2 +min +�1/(2q+1) +→ 0. +Remark 9. It is worth mentioning that the proposed test statistic �UnK contains high dimensional integral +over t ∈ Rd, which can be computational intractable in practice. To make �UnK well defined and facilitate +the computation, we shall choose suitable weight function w(t) such that �UnK has a simple closed-form +expression. In the literature, various kinds of weight functions have been proposed such as the Gaussian +kernel function (Gretton et al., 2012), the Laplace kernel function (Gretton et al., 2012) and the energy +kernel function (Sz´ekely et al., 2007, Rizzo and Sz´ekely, 2010). For instance, let w(t) denote the density +function of the random vector Xκ/√η for some κ > 0, where X ∼ N(0, Id) and η ∼ χ2 +1 are independent +(equivalently Xκ/√η is a Cauchy random variable with location parameter 0 and scale parameter κ). Then +it is straightforward to verify that +� +Yij(t)Ylk(t)w(t)dt = +� +cos{t⊤(Yij − Ylk)}w(t)dt = exp(−κ|Yij − Ylk|), +which is the same as the Laplace kernel function with 1/κ being its bandwidth, where | · | stands for the +Euclidean distance. A more general result can be derived using Bochner’s Theorem, see e.g., Theorem 3.1 +of Gretton et al. (2009). Consequently, the proposed test statistic �UnK reduces to +�UnK = +K +� +i=1 +Pii,K +Ni +� +j=1 +� +k̸=j +exp(−κ|Yij − Yik|) + +K +� +i=1 +� +l̸=i +Pil,K +Ni +� +j=1 +Nl +� +k=1 +exp(−κ|Yij − Ylk|), +which is fairly convenient to compute in practice. Moreover, suitable choice of the weight function w(t) also +facilitate the analysis of the quantities Mq and F. +4 +Practical implementation +In this section, we propose an unbiased estimator for ς2, which is ratio-consistent under fairly mild moment +conditions. To begin with, since E(V ⊤ +i Vj)2 = ς2 for any i ̸= j, a natural unbiased U type estimator for ς2 +based on {Vi}n +i=1 would be +�ς2 +o = +1 +n(n − 1) +n +� +i=1 +� +j̸=i +(V ⊤ +i Vj)2. +(4.1) +Let ¯P1 = In − X(X⊤X)−1X⊤ = (Pij,1)n×n and �V = ¯P1Y = (�V1, . . . , �Vn)⊤. It is worth noting that directly +substituting the residual vectors {�Vi}n +i=1 into (4.1) yields a feasible but generally biased estimator for ς2. +More specifically, for any i ̸= j, +E(�V ⊤ +i �Vj)2 = ( ¯Pii,1 ¯Pjj,1 + ¯P 2 +ij,1)ς2 + ¯P 2 +ij,1E(V ⊤ +1 V1)(V ⊤ +2 V2) + +n +� +k=1 +( ¯Pik,1 ¯Pjk,1)2 � +∥E0(V ⊤ +1 V1)∥2 +2 − 2ς2� +, +12 + +which reveals that (�V ⊤ +i �Vj)2 is no longer unbiased of ς2 even after proper scaling. This motivates us to +propose a new unbiased estimator for ς2 via data-splitting, which excludes the bias terms (V ⊤ +i Vi)2 and +(V ⊤ +i Vi)(V ⊤ +j Vj). Without loss of generality, we assume that the sample size n is even in what follows. +1. Randomly split {1, . . . , n} into two halves A and Ac. Denote MA = {(Xi, Yi), i ∈ A} and MAc = +{(Xi, Yi), i ∈ Ac}. +2. For both MA and MAc, fit model (1.1) with the least squares estimates and compute +�ΣA = +1 +n/2 − p +�V ⊤ +A �VA and �ΣAc = +1 +n/2 − p +�V ⊤ +Ac �VAc, +where �VA and �VAc are the residual matrices of MA and MAc, respectively. +3. Compute the estimator �ς2 +A = tr(�ΣA�ΣAc). +Since �ΣA and �ΣAc are independent and both of them are unbiased estimators of Σ, �ς2 +A is unbiased for ς2 as +E(�ς2 +A) = tr{E(�ΣA)E(�ΣAc)} = tr(Σ2) = ς2. +Theorem 4.1. Assume that p/n < ϖ2 for some positive constant ϖ2 < 1/2 and that the least squares +estimates are well defined for both MA and MAc. Then we have +E +���� +�ςA +ς − 1 +���� +2 +≲ M4 +n2 + p × tr(Σ4) +n2ς4 ++ ∥E0(V ⊤ +1 ΣV1)∥2 +2 +nς4 +. +Remark 10. The proof of Theorem 4.1 is given in Section 7.2, where a more general upper bound on +E|�ςA/ς −1|τ is established for 1 < τ ≤ 2. Theorem 4.1 reveals that �ςA is ratio consistent under mild moment +conditions. Suppose now {Vi}i∈N follow the linear process model (2.6) with max1≤ℓ≤L E|ξiℓ|4 ≤ C < ∞. +Then M4 is bounded and ∥E0(V ⊤ +1 ΣV1)∥2 +2 ≲ tr(Σ4). Consequently, +E +���� +�ςA +ς − 1 +���� +2 +≲ n−2 + tr(Σ4) +nς4 . +In this case, �ςA is ratio consistent for arbitrary dimension d ≥ 1 as long as n → ∞. +Remark 11. There are totally +� n +n/2 +� +different ways of splitting {1, . . . , n} into two halves. To reduce the +influence of randomness of an arbitrary splitting, we can repeat the procedure independently for multiple +times and then take the average of the resulting estimators. We refer to Fan et al. (2012) for more discussions +about data-splitting and repeated data-splitting. +Remark 12. Let �Σ = (n − p)−1 �V ⊤ �V . Observe that E(�V ⊤ +i �Vj) = ¯Pij,1tr(Σ). We can estimate ς2 via +�ς2 +S = +�n +i,j=1 |�V ⊤ +i �Vj − ¯Pij,1tr(�Σ)|2 +(n − p + 2)(n − p − 1) += +(n − p)2 +(n − p + 2)(n − p − 1) +� +|�Σ|2 +F − {tr(�Σ)}2 +n − p +� +, +which is same as the estimator proposed in Srivastava and Fujikoshi (2006), where {Vi}n +i=1 are assumed to +be Gaussian random vectors. See also Bai and Saranadasa (1996). However, for non-Gaussian {Vi}n +i=1 such +that ∥E0(V ⊤ +1 V1)∥2 +2 ̸= 2ς2, this estimator is generally biased as +E(�ς2 +S) − ς2 = +�n +i=1 ¯P 2 +ii,1 +(n − p)(n − p + 2) +� +∥E0(V ⊤ +1 V1)∥2 +2 − 2ς2� +. +In particular, the bias of �ς2 +S can diverge when ∥E0(V ⊤ +1 V1)∥2 +2 is much larger than ς2. Below we provide an +example that typifies the diverging bias. +13 + +G +G +G +G +G +G +2 +4 +6 +8 +10 +12 +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 +3.5 +d × 100 +G +Split +SF +Oracle +G +G +G +G +G +G +2 +4 +6 +8 +10 +12 +0.0 +0.5 +1.0 +1.5 +2.0 +d × 100 +Figure 1: Empirical averages of the values of |�ς/ς − 1| +Example 4.1. Let {ξi}i∈N and {ξ′ +i}i∈N be two sequences of independent Gaussian random vectors N(0, Σ), +where Σ = (Σij)n×n has entries Σij = ϑ|i−j| for some ϑ ∈ (0, 1). Following Wang et al. (2015), we draw +i.i.d. innovations {Vi}n +i=1 from a scale mixture of two independent multivariate Gaussian distributions as +follows, +Vi = νi × ξi + 3(1 − νi) × ξ′ +i (i = 1, . . . , n), +where {νi}i∈N are independent Bernoulli random variables with P(νi = 1) = 0.9. A simulation study is given +in Section 5 by setting ϑ = 0.3 and 0.7. We report in Figure 1 the average values of |�ς/ς −1| for �ςA, �ςo and �ςS, +based on 1000 replications with the numerical setup (n, p, m) = (100, 20, 10) and d = 200, 400, 800, 1000, 1200. +For both cases of ϑ, |�ςA/ς −1| and |�ςo/ς −1| are very close to 0, while |�ςS/ς −1| is quite large. More precisely, +we can derive that ∥E0(V ⊤ +1 V1)∥2 +2 ≈ (18 + d)ς2. +Substituting the ratio-consistent estimator �ς2 +A into var(Un) = 2|Pθ|2 +Fς2 yields Un/(�ςA|Pθ|F) ⇒ N(0, 2) +under (2.16). Then, for α ∈ (0, 1), an asymptotic α level test is given by +ΦZ = I +� +Un +�ςA|Pθ|F +√2 > z1−α +� +, +(4.2) +where z1−α is the (1 − α)th quantile of the standard normal distribution. +5 +A simulation study +In this section, we conduct a Monte Carlo simulation study to assess the finite sample performance of +the proposed tests. +In the model (1.1), we write Xi = (1, x⊤ +i )⊤ ∈ Rp to include an intercept. +Here +x1, . . . , xn ∈ Rp−1 are i.i.d. N(0, Ip−1) random vectors. Let m < p. For all k ∈ {1, . . . , p − m}, all entries +of the coefficient vector Bk are i.i.d. uniform random variables in the interval (1, 2). After those Bk’s are +generated, we keep their values throughout the simulation. Our goal is to identify the zero Bk’s by testing +H0 : Bp−m+1 = Bp−m+2 = · · · = Bp = 0. +14 + +In our simulation, we set (p, m) = (20, 10), n = 100, 200 and d = 400, 800, 1200. We consider two different +designs of the innovations (Vi): the one introduced in Example 4.1 and the one in Example 5.1 below. In +both examples, the parameter ϑ is set to be 0.3 and 0.7. +Example 5.1. Let {ξij}i,j∈N be i.i.d. random variables with E(ξ11) = 0 and var(ξ11) = 1. In particular, we +consider two cases for (ξij); they are drawn from the standardized t5 distribution and the standardized χ2 +5 +distribution, respectively. For some ϑ ∈ (0, 1), we generate +Vi = √(1 − ϑ) × ξi + √ϑ × (ξi0, ξi0, . . . , ξi0)⊤, i ∈ N. +We shall apply a Gaussian multiplier bootstrap approach to implement our proposed test. The procedure +is as follows. +1. Compute the residual matrix �V = (�V1, . . . , �Vn)⊤ = ¯P1Y . Generate i.i.d. N(0, 1) random variables +{ωij}i,j∈N and compute the bootstrap residuals V ⋆ = (V ⋆ +1 , . . . , V ⋆ +n )⊤, where +V ⋆ +i = +1 +√(n − p) +n +� +j=1 +ωij �Vi (i = 1, . . . , n). +2. Use V ⋆ to compute �ς⋆ +A and the bootstrap test statistic U ⋆ +n = tr(V ⋆⊤PθV ⋆). +3. Repeat the first two steps independently B times and collect U ⋆ +nk and �ς⋆ +Ak, k = 1, . . . , B. +4. Let �c1−α be the (1 − α)th quantile of {U ⋆ +nk/(�ς⋆ +Ak|Pθ|F +√2)}k=1,...,B. The our test is +ΦB = I +� +Un +�ςA|Pθ|F +√2 > �c1−α +� +, +(5.1) +and we shall reject the null hypothesis whenever ΦB = 1. +Similar to Gn, U ⋆ +n is a quadratic functional of i.i.d. Gaussian random vectors conditional on {X, Y } and +is distributed as a linear combination of independent chi-squared random variables. To justify the validity +of the proposed Gaussian multiplier bootstrap approach, it suffices to bound the distance between the +distribution functions of these two quadratic functionals, which can be established by verifying the normalized +consistency (Xu et al., 2014) of the corresponding covariance matrix. However, this can be highly non-trivial +in the high dimensional setting and is beyond the scope of current paper. Hence we leave it for future work. +In our simulation, we set the bootstrap size B = 1000. As comparison, we also perform the test suggested +in (4.2) based on the central limit theorem and the one proposed in Srivastava and Kubokawa (2013) which +we denote by SK. For each test, we report the empirical size based on 2000 replications as displayed in +Table 1 and Table 2. The results suggest that our proposed test by using the bootstrap procedure provides +the best size accuracy in general as the empirical sizes are close to the nominal level α. +For Example 4.1, both of the test by CLT and our Gaussian multiplier bootstrap method have better +performance than the SK test since the latter is too conservative as d is large. +As expected from our +theoretical results, normal approximation can work reasonably well in this design. +For Example 5.1, the Gaussian multiplier bootstrap method outperforms other two procedures in size +accuracy for all cases. The SK test suffers from size distortion. The test by CLT inflates the size more than +15 + +Table 1: Empirical sizes for Example 4.1 with α = 0.05 +θ = 0.3 +θ = 0.7 +n +d +CLT +GMB +SK +CLT +GMB +SK +100 +400 +0.057 +0.047 +0.041 +0.059 +0.051 +0.036 +800 +0.049 +0.045 +0.033 +0.063 +0.056 +0.026 +1200 +0.062 +0.055 +0.021 +0.048 +0.045 +0.028 +200 +400 +0.056 +0.052 +0.042 +0.052 +0.047 +0.037 +800 +0.052 +0.049 +0.037 +0.053 +0.050 +0.033 +1200 +0.045 +0.044 +0.029 +0.050 +0.046 +0.035 +Table 2: Empirical sizes for Example 5.1 with α = 0.05 +t5 +χ2 +5 +θ +n +d +CLT +GMB +SK +CLT +GMB +SK +0.3 +100 +400 +0.068 +0.058 +0.023 +0.083 +0.065 +0.036 +800 +0.082 +0.066 +0.023 +0.074 +0.058 +0.016 +1200 +0.082 +0.068 +0.015 +0.067 +0.053 +0.011 +200 +400 +0.073 +0.059 +0.022 +0.067 +0.054 +0.018 +800 +0.071 +0.057 +0.012 +0.074 +0.058 +0.014 +1200 +0.076 +0.059 +0.011 +0.077 +0.058 +0.011 +0.7 +100 +400 +0.074 +0.055 +0.002 +0.082 +0.062 +0.002 +800 +0.084 +0.066 +0.001 +0.085 +0.071 +0.000 +1200 +0.073 +0.057 +0.000 +0.076 +0.062 +0.001 +200 +400 +0.083 +0.067 +0.001 +0.080 +0.064 +0.000 +800 +0.068 +0.050 +0.000 +0.075 +0.062 +0.000 +1200 +0.070 +0.051 +0.001 +0.074 +0.056 +0.000 +16 + +the GMB method, which can be explained by the fact that condition (3.1) does not hold and the CLT for +Un fails. More specifically, for both θ = 0.3 and θ = 0.7, elementary calculations show that λ1(Σ)/ς → 1. +As a result, (2.16) is violated as m = 10; see also the comment at the end of Section 2.2 for discussion on +the non-normality of Un. To have more insight, we display in Figure 2 the density plots of Un/√var(Un) for +n = 100 as well as the density of N(0, 1). As we can see from the plots, the distribution of Un/√var(Un) is +skewed to the right for all cases, which explains the inflated sizes of the CLT test. +More simulation studies on power comparison of these three tests are conducted in Section 7.1. +Figure 2: Density plots of Un/√var(Un) and N(0, 1) +−4 +−2 +0 +2 +4 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +Density +d=400 +d=800 +d=1200 +Normal +−4 +−2 +0 +2 +4 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +−4 +−2 +0 +2 +4 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +x +Density +−4 +−2 +0 +2 +4 +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +x +density +6 +Data analysis +We apply the proposed method to two data sets. Our first dataset came from a study of the impact of the +gut microbiome on host serum metabolome and insulin sensitivity in non-diabetic Danish adults (Pedersen +et al., 2016). It consists of measurements of 1201 metabolites (325 serum polar metabolites and 876 serum +molecular lipids) on 289 serum samples using mass spectrometry. +The cleaned dataset was downloaded +from https://bitbucket.org/hellekp/clinical-micro-meta-integration (Pedersen et al., 2018). We use this data +set to identify insulin resistance (IR)-associated metabolites. IR was estimated by the homeostatic model +assessment (Pedersen et al., 2016). Body mass index (BMI) is a confounder for this dataset since it is highly +correlated with IR (Spearman’s ρ = 0.67) and is known to affect the serum metabolome. Two samples +without IR measurement were excluded. For metabolites with zero measurements, zeros were replaced by +half of the minimal nonzero value. Log transformation was performed to make the data more symmetrically +distributed before analysis. The p-values associated with the three methods (GLT, GMB, and SK) are all +17 + +very close to zero, indicating a strong dependence between metabolites and IR. We further perform a linear +regression analysis on each metabolite using IR and BMI as the covariates. Figure 3 (left panel) presents +the histogram of the p-values on testing the significance of the coefficients associated with IR. We see a +high peak close to zero, which provides strong evidence on the association between metabolites and IR. We +further apply the Holm–Bonferroni procedure to the p-values to control the family-wise error rate at the 5% +level, resulting in 164 discoveries. +Our second dataset is from the study of the smoking effect on the human upper respiratory tract (Charlson +et al., 2010). The original data set contains samples from both throat and nose microbiomes and both body +sides. Here we focus on the throat microbiome of the left body side, which includes 60 subjects consisting of 32 +nonsmokers and 28 smokers. More precisely, the data set is presented as a 60×856 abundance table recording +the frequencies of detected operational taxonomic units (OTUs) in the samples using the 16S metagenomics +approach, together with a metadata table capturing the sample-level information, including the smoking +status and sex. We transform the OTU abundance using center log-ratio (CLR) transformation after adding +a pseudo-count of 0.5 to the zero counts. Our goal is to test the association of throat microbiomes with +smoking status adjusting for sex. The proposed method using either the normal approximation or bootstrap +approximation detects a strong association between the throat microbiomes with smoking status. In contrast, +the SK method fails to discover the association. +We further perform an OTU-wise linear regression analysis using each OTU (after the CLR transfor- +mation) as the response and the smoking status and sex as covariates. Figure 3 (right panel) presents the +histogram of the p-values for testing the association between each OTU and smoking status after adjusting +sex in each linear regression. Interestingly, adjusting the multiplicity using either the Holm–Bonferroni pro- +cedure or the BH procedure at the 5% level gives zero discovery (Zhou et al., 2021). These results suggest +that the association between individual OTU and smoking status is weak. However, after aggregating the +weak effects from all the OTUs, the combined effect is strong enough to be detected by the proposed method. +Table 3: P-values of the three methods applying to the metabolomics and microbiome data sets. +Metabolomics +Microbiome +CLT +GMB +SK +CLT +GMB +SK +p-value +0.00 +0.00 +0.00 +9.7 × 10−6 +0.002 +0.13 +References +Michael G Akritas and Steven F Arnold. Fully nonparametric hypotheses for factorial designs I: Multivariate +repeated measures designs. J. Amer. Statist. Assoc., 89(425):336–343, 1994. 11 +T. W. Anderson. +An introduction to multivariate statistical analysis. +Wiley Series in Probability and +Statistics. 2003. 2 +Zhidong Bai and Hewa Saranadasa. Effect of high dimension: by an example of a two sample problem. +Statist. Sinica, 6(2):311–329, 1996. 2, 9, 13 +18 + +Figure 3: Histograms of the p-values for testing the association between individual omics feature and the +variable of interest after adjusting for the confounder. +Metabolomics +Microbiome +0.00 +0.25 +0.50 +0.75 +1.00 +0.00 +0.25 +0.50 +0.75 +1.00 +0.0% +5.0% +10.0% +15.0% +pvalue +count/sum(count) +Edgar Brunner and Madan L. Puri. Nonparametric methods in factorial designs. Statist. Papers, 42(1):1–52, +2001. 11 +T. Tony Cai and Zongming Ma. +Optimal hypothesis testing for high dimensional covariance matrices. +Bernoulli, 19(5B):2359–2388, 2013. 9 +T. Tony Cai and Yin Xia. High-dimensional sparse MANOVA. J. Multivariate Anal., 131:174–196, 2014. 2 +Emily S Charlson, Jun Chen, Rebecca Custers-Allen, Kyle Bittinger, Hongzhe Li, Rohini Sinha, Jennifer +Hwang, Frederic D Bushman, and Ronald G Collman. Disordered microbial communities in the upper +respiratory tract of cigarette smokers. PloS one, 5(12):e15216, 2010. 18 +Song Xi Chen and Ying-Li Qin. A two-sample test for high-dimensional data with applications to gene-set +testing. Ann. Statist., 38(2):808–835, 2010. 2, 9, 10 +Xiaohui Chen. Gaussian and bootstrap approximations for high-dimensional U-statistics and their applica- +tions. Ann. Statist., 46(2):642–678, 2018. 3 +Jianqing Fan, Shaojun Guo, and Ning Hao. Variance estimation using refitted cross-validation in ultrahigh +dimensional regression. J. R. Stat. Soc. Ser. B. Stat. Methodol., 74(1):37–65, 2012. 13 +F. G¨otze and A. Tikhomirov. Asymptotic distribution of quadratic forms and applications. J. Theoret. +Probab., 15(2):423–475, 2002. 3 +F. G¨otze and A. N. Tikhomirov. Asymptotic distribution of quadratic forms. Ann. Probab., 27(2):1072–1098, +1999. 3 +19 + +Arthur Gretton, Kenji Fukumizu, and Bharath K Sriperumbudur. Discussion of: Brownian distance covari- +ance. Ann. Appl. Stat., 3(4):1285–1294, 2009. 12 +Arthur Gretton, Karsten M. Borgwardt, Malte J. Rasch, Bernhard Sch¨olkopf, and Alexander Smola. A +kernel two-sample test. J. Mach. Learn. Res., 13:723–773, 2012. 12 +Yinqiu He, Bo Meng, Zhenghao Zeng, and Gongjun Xu. On the phase transition of wilks’ phenomenon. +Biometrika, 108(3):741–748, 2021. 5 +Jiang Hu, Zhidong Bai, Chen Wang, and Wei Wang. On testing the equality of high dimensional mean +vectors with unequal covariance matrices. Ann. Inst. Statist. Math., 69(2):365–387, 2017. 2 +Peter J. Huber. Robust regression: asymptotics, conjectures and Monte Carlo. Ann. Statist., 1:799–821, +1973. 8 +William H Kruskal and W Allen Wallis. Use of ranks in one-criterion variance analysis. J. Amer. Statist. +Assoc., 47(260):583–621, 1952. 11 +Soumendra Nath Lahiri. Bootstrapping M-estimators of a multiple linear regression parameter. Ann. Statist., +20(3):1548–1570, 1992. 8 +Huiqin Li, Jiang Hu, Zhidong Bai, Yanqing Yin, and Kexin Zou. Test on the linear combinations of mean +vectors in high-dimensional data. TEST, 26(1):188–208, 2017. 2 +Enno Mammen. Asymptotics with increasing dimension for robust regression with applications to the boot- +strap. Ann. Statist., 17(1):382–400, 1989. 8 +William Navidi. Edgeworth expansions for bootstrapping regression models. Ann. Statist., 17(4):1472–1478, +1989. 8 +Helle Krogh Pedersen, Valborg Gudmundsdottir, Henrik Bjørn Nielsen, Tuulia Hyotylainen, Trine Nielsen, +Benjamin AH Jensen, Kristoffer Forslund, Falk Hildebrand, Edi Prifti, Gwen Falony, et al. Human gut +microbes impact host serum metabolome and insulin sensitivity. Nature, 535(7612):376–381, 2016. 17 +Helle Krogh Pedersen, Sofia K Forslund, Valborg Gudmundsdottir, Anders Østergaard Petersen, Falk Hilde- +brand, Tuulia Hy¨otyl¨ainen, Trine Nielsen, Torben Hansen, Peer Bork, S Dusko Ehrlich, et al. A com- +putational framework to integrate high-throughput ‘-omics’ datasets for the identification of potential +mechanistic links. Nature protocols, 13(12):2781–2800, 2018. 17 +Stephen Portnoy. Asymptotic behavior of M estimators of p regression parameters when p2/n is large. II. +Normal approximation. Ann. Statist., 13(4):1403–1417, 1985. 8 +Maria L. Rizzo and G´abor J. Sz´ekely. DISCO analysis: a nonparametric extension of analysis of variance. +Ann. Appl. Stat., 4(2):1034–1055, 2010. 11, 12 +James R. Schott. +Some high-dimensional tests for a one-way MANOVA. +J. Multivariate Anal., 98(9): +1825–1839, 2007. 2 +20 + +Jun Shao. On resampling methods for variance and bias estimation in linear models. Ann. Statist., 16(3): +986–1008, 1988. 8 +Jun Shao and C.-F. J. Wu. Heteroscedasticity-robustness of jackknife variance estimators in linear models. +Ann. Statist., 15(4):1563–1579, 1987. 8 +Muni S. Srivastava and Yasunori Fujikoshi. Multivariate analysis of variance with fewer observations than +the dimension. J. Multivariate Anal., 97(9):1927–1940, 2006. 13 +Muni S. Srivastava and Tatsuya Kubokawa. Tests for multivariate analysis of variance in high dimension +under non-normality. J. Multivariate Anal., 115:204–216, 2013. 15 +Muni S. Srivastava, Shota Katayama, and Yutaka Kano. A two sample test in high dimensional data. J. +Multivariate Anal., 114:349–358, 2013. 2 +G´abor J. Sz´ekely, Maria L. Rizzo, and Nail K. Bakirov. Measuring and testing dependence by correlation of +distances. Ann. Statist., 35(6):2769–2794, 2007. 12 +Olivier Thas. Comparing distributions. Springer Series in Statistics. Springer, New York, 2010. 11 +Lan Wang, Bo Peng, and Runze Li. A high-dimensional nonparametric multivariate test for mean vector. +J. Amer. Statist. Assoc., 110(512):1658–1669, 2015. 14 +Jennifer Wessel and Nicholas J Schork. Generalized genomic distance–based regression methodology for +multilocus association analysis. The American Journal of Human Genetics, 79(5):792–806, 2006. 2 +C.-F. J. Wu. Jackknife, bootstrap and other resampling methods in regression analysis. Ann. Statist., 14 +(4):1261–1350, 1986. With discussion and a rejoinder by the author. 8 +Mengyu Xu, Danna Zhang, and Wei Biao Wu. L2 asymptotics for high-dimensional data. arXiv preprint +arXiv:1405.7244, 2014. 15 +Mengyu Xu, Danna Zhang, and Wei Biao Wu. Pearson’s chi-squared statistics: approximation theory and +beyond. Biometrika, 106(3):716–723, 2019. 5 +Shun Yao, Xianyang Zhang, and Xiaofeng Shao. Testing mutual independence in high dimension via distance +covariance. J. R. Stat. Soc. Ser. B. Stat. Methodol., 80(3):455–480, 2018. 9 +Matthew A. Zapala and Nicholas J. Schork. Multivariate regression analysis of distance matrices for testing +associations between gene expression patterns and related variables. Proceedings of the National Academy +of Sciences, 103(51):19430–19435, 2006. 2 +Matthew A Zapala and Nicholas J Schork. Statistical properties of multivariate distance matrix regression +for high-dimensional data analysis. Frontiers in genetics, 3:190, 2012. 2 +Jin-Ting Zhang, Jia Guo, and Bu Zhou. Linear hypothesis testing in high-dimensional one-way MANOVA. +J. Multivariate Anal., 155:200–216, 2017. 2 +21 + +Xianyang Zhang, Shun Yao, and Xiaofeng Shao. Conditional mean and quantile dependence testing in high +dimension. Ann. Statist., 46(1):219–246, 2018. 9 +Bu Zhou, Jia Guo, and Jin-Ting Zhang. +High-dimensional general linear hypothesis testing under het- +eroscedasticity. J. Statist. Plann. Inference, 188:36–54, 2017. 2 +Huijuan Zhou, Kejun He, Jun Chen, and Xianyang Zhang. Linda: Linear models for differential abundance +analysis of microbiome compositional data. arXiv preprint arXiv:2104.00242, 2021. 18 +22 + diff --git a/69E2T4oBgHgl3EQf7ggl/content/tmp_files/load_file.txt b/69E2T4oBgHgl3EQf7ggl/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..5cd17e0cfee5d9b421ae4988bab6f8fe2b9523d0 --- /dev/null +++ b/69E2T4oBgHgl3EQf7ggl/content/tmp_files/load_file.txt @@ -0,0 +1,1165 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf,len=1164 +page_content='High Dimensional Analysis of Variance in Multivariate Linear Regression Zhipeng Lou1, Xianyang Zhang2 and Wei Biao Wu3 January 12, 2023 Abstract In this paper, we develop a systematic theory for high dimensional analysis of variance in multivariate linear regression, where the dimension and the number of coefficients can both grow with the sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We propose a new U type test statistic to test linear hypotheses and establish a high dimensional Gaussian approximation result under fairly mild moment assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Our general framework and theory can be applied to deal with the classical one-way multivariate ANOVA and the nonparametric one-way MANOVA in high dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To implement the test procedure in practice, we introduce a sample-splitting based estimator of the second moment of the error covariance and discuss its properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A simulation study shows that our proposed test outperforms some existing tests in various settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Keywords: Data-splitting;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Gaussian approximation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate analysis of variance;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' One-way layout;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' U statistics 1 Introduction In statistical inference of multivariate linear regression, a fundamental problem is to investigate the rela- tionships between the covariates and the responses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In this article, we aim to test whether a given set of covariates are associated with the responses by multivariate analysis of variance (MANOVA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To fix the idea, we build the multivariate linear regression model with p predictors as Yi = B⊤Xi + Vi (i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) where Yi = (Yi1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Yid)⊤ and Xi = (Xi1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Xip)⊤ are respectively the response vector and the predictor vector respectively for the ith sample, B⊤ = (B1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Bp) is the unknown coefficient matrix with Bk ∈ Rd consisting of coefficients on the kth covariate, and the innovation vectors V1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Vn ∈ Rd are independent and identically distributed random vectors with E(V1) = 0 and cov(V1) = Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The first element of Xi can be set to be 1 to reflect an intercept term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Equivalently we can write (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) in compact matrix form as Y = XB + V, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) 1Department of Operations Research and Financial Engineering, Princeton, NJ 08544.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2Department of Statistics, Texas A&M University, College Station, TX 77843.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3Department of Statistics, University of Chicago, Chicago, IL, 60637.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='04209v1 [stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='ME] 10 Jan 2023 where Y = (Y1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Yn)⊤, X = (X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Xn)⊤ and V = (V1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Vn)⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let C ∈ Rm×p be a matrix of rank m, where m ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , p}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We are interested in testing a collection of linear constraints on the coefficient matrix H0 : CB = 0 versus H1 : CB ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) This testing problem has been extensively studied in the low dimensional setting where both the number of predictors and the dimension of the response are relatively small compared to the sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A natural and popular choice is the classical likelihood ratio test when the errors are normally distributed;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see Chapter 8 in Anderson (2003) for a review of theoretical investigations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In recent years, high dimensional data are increasingly encountered in various applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Over the past decade, there have been tremendous efforts to develop new methodologies and theories for high dimensional regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The paradigm where d is 1 or small and p can increase with n has received considerable attention, while on the other hand the one where d is very large and p is relatively small has been less studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) in the latter setting has been applied to a number of research problems involving high-dimensional data types such as DNA sequence data, gene expression microarray data, and imaging data;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see for example Zapala and Schork (2006), Wessel and Schork (2006) and Zapala and Schork (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Those related studies typically generate huge amounts of data (responses) that, due to their expense and sophistication, are often collected on a relatively small number of individuals, and investigate how the data can be explained by a certain number of predictor variables such as the ages of individuals assayed, clinical diagnoses, strain memberships, cell line types, or genotype information (Zapala and Schork, 2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Owing to inappropriateness of applying the standard MANOVA strategy and shortage of high-dimensional MANOVA theory, biological researchers often considered some form of data reduction such as cluster analysis and factor analysis, which can suffer from many problems, as pointed out by Zapala and Schork (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In the works Zapala and Schork (2006, 2012), the authors incorporated a distance matrix to modify the standard MANOVA, but they commented that there is very little published material that can be used to guide a researcher as to which distance measure is the most appropriate for a given situation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Motivated by these real-world applications, we aim to develop a general methodology for high dimensional MANOVA and lay a theoretical foundation for assessing statistical significance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The testing problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) for model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) is closely related to a group of high dimensional hypothesis tests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Two-sample mean test, for testing H0 : µ1 = µ2 where µ1 ∈ Rd and µ2 ∈ Rd are mean vectors of two different populations, is a special case with p = 2, B = (µ1, µ2)⊤ and C = (1, −1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' There is a large literature accommodating the Hotelling T 2 type statistic into the high-dimensional situation where d is large;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see for example, Bai and Saranadasa (1996), Chen and Qin (2010), Srivastava et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2013) among many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It can be generalized to test the equality of multiple mean vectors in high dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Some notable work includes Schott (2007), Cai and Xia (2014), Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2017), Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2017), Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2017) and Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In most existing work, the random samples were assumed to be Gaussian or follow some linear structure as that of Bai and Saranadasa (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In contrast, the testing problem we are concerned is much more general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For one thing, all the aforementioned high dimensional mean test problems can be fitted into our framework, apart from which, we can deal with the more general multivariate linear regression in the presence of an increasing number of predictor variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For another, we do not assume the Gaussianity or any particular structure of the error vectors {Vi}n i=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Throughout the paper, we assume that p < n and the design matrix X is of full column rank such that 2 X⊤X is invertible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The conventional MANOVA test statistic for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) is given by Qn = |PY |2 F = n � i=1 n � j=1 PijY ⊤ i Yj, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4) where | · |F stands for the Frobenius norm and P = X(X⊤X)−1C⊤{C(X⊤X)−1C⊤}−1C(X⊤X)−1X⊤ = (Pij)n×n is the orthogonal projection matrix onto the column space of the matrix X(X⊤X)−1C⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We shall reject the null hypothesis H0 if Qn is larger than some critical value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In the univariate case where d = 1, the asymptotic behavior of Qn has been extensively studied in literature;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see G¨otze and Tikhomirov (1999) and G¨otze and Tikhomirov (2002) for detailed discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The validity to perform a test for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) using Qn when d is large has been open for a long time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The first goal of the paper is to provide a solution to this open problem by rigorously establishing a distributional approximation of the traditional MANOVA test statistic when d is allowed to grow with n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Our key tool is the Gaussian approximation for degenerate U type statistics: under fairly mild moment conditions, quadratic functionals of non-Gaussian random vectors can be approximated by those of Gaussian vectors with the same covariance structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is worth mentioning that Chen (2018) established a Gaussian approximation result for high dimensional non-degenerate U statistics by Stein’s method, which can not be applied to the degenerate case here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' From a technical point of view, we employ completely different arguments to bound distance between the distribution functions of the test statistic and its Gaussian analogue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The main contributions of this paper are three-fold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Firstly, we develop a systematic theory for the conventional MANOVA test statistic Qn in the high dimensional setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More specifically, we shall establish a dichotomy result: Qn can be approximated either by a linear combination of independent chi-squared random variables or by a normal distribution under different conditions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' While this reveals the interesting theoretical properties of the test statistics, it causes difficulties in applications as one may not know which asymptotic distribution to use in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To overcome this difficulty, as the second main contribution of our paper, we propose using a new U type test statistic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Using the modified test statistic, such a dichotomy does not appear;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 for the asymptotic result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Thirdly, we will propose a new estimator for the second spectral moment of the covariance matrix via a data-splitting technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To the best of our knowledge, it is the first work concerning an unbiased and ratio consistent estimator in the multivariate linear regression model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We now introduce some notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let I{·} denote the indicator function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For random variables X ∈ R and Y ∈ R, the Kolmogorov distance is defined by ρ(X, Y ) = supz∈R |P(X ≤ z) − P(Y ≤ z)|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For q > 0, we write ∥X∥q = (E|X|q)1/q if E|X|q < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For two matrices A = (aij)i≤I,j≤J and B = (bij)i≤I,j≤J, A ◦ B = (aijbij)i≤I,j≤J denotes their Hardmard product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For any positive integer m, we use Im to denote m × m identity matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For two sequences of positive numbers (an) and (bn), we write an ≲ bn if there exists some constant C such that an ≤ Cbn for all large n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We use C, C1, C2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' to denote positive constants whose value may vary at different places.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3 2 Theoretical results We start with some notational definitions and basic assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let λ1(Σ) ≥ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' ≥ λd(Σ) ≥ 0 denote the eigenvalues of Σ = cov(V1) and let ς = |Σ|F = {�d k=1 λ2 k(Σ)}1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For q ≥ 2, we define Mq = E ���� V ⊤ 1 V2 ς ���� q and Lq = E ���� V ⊤ 1 ΣV1 ς2 ���� q/2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Recall that P11, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Pnn are diagonal elements of the matrix P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that 1 m n � i=1 P 2 ii → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 is quite natural and mild for testing (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For instance, it automatically holds for one sample test of mean vector as m−1 �n i=1 P 2 ii = 1/n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Additionally, in the context of K-sample test, as discussed in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 is satisfied as long as the minimum sample size goes to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More generally, since �n i=1 Pii = m, a simple sufficient condition for Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 would be max1≤i≤n Pii → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Further discussions on this condition will be given in Remark 6 and Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 Asymptotic distribution of the conventional MANOVA test statistics Under the null hypothesis CB = 0, PXB = X(X⊤X)−1C⊤{C(X⊤X)−1C⊤}−1CB = 0 and hence Qn = |PXB + PV |2 F H0 = |PV |2 F, which can be further decomposed as Qn H0 = n � i=1 n � j=1 PijV ⊤ i Vj = n � i=1 PiiV ⊤ i Vi + n � i=1 � j̸=i PijV ⊤ i Vj =: Dn + Q⋆ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) Observe that Dn is a weighted sum of i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' random variables and Q⋆ n is a second order non-degenerate U - statistic of high dimensional random vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' These two terms can be differently distributed under the high dimensional setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More specifically, since Dn and Q⋆ n are uncorrelated, we have var(Qn) = var(Dn) + var(Q⋆ n), where var(Dn) = n � i=1 P 2 ii∥E0(V ⊤ 1 V1)∥2 2 and var(Q⋆ n) = 2 � m − n � i=1 P 2 ii � ς2, where E0(V ⊤ 1 V1) = V ⊤ 1 V1 − E(V ⊤ 1 V1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' When the dimension d increases with the sample size n, the mag- nitudes of var(Dn) and var(Q⋆ n) can be quite different for non-Gaussian {Vi}n i=1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' As a consequence, Qn can exhibit different asymptotic null distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More precisely, to asymptotically quan- tify the discrepancy between var(Dn) and var(Q⋆ n), under Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, we define Λ2 = �n i=1 P 2 ii∥E0(V ⊤ 1 V1)∥2 2 mς2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Before presenting the distributional theory for Qn, we first define its Gaussian analogue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let Z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Zn be i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' N(0, Σ) Gaussian random vectors and write Z = (Z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Zn)⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then the Gaussian analogue of Qn is defined as the same quadratic functional of {Zi}n i=1, Gn = |PZ|2 F = n � i=1 n � j=1 PijZ⊤ i Zj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) 4 Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let q = 2 + δ, where 0 < δ ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Suppose Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 holds and ∆q = �n i=1 � j̸=i |Pij|q mq/2 Mq + �n i=1 P q/2 ii mq/2 Lq → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume Λ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, under (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4) and the null hypothesis, we have ρ(Qn, Gn) ≤ C1Λ2/5 + Cq∆1/(2q+1) q + C2 � 1 m n � i=1 P 2 ii �1/5 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume Λ → ∞ and the Lindeberg condition holds for Wi = E0(PiiV ⊤ i Vi)/(Λς√m), that is, �n i=1 E(W 2 i I{|Wi| > ϵ}) → 0 for any ϵ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, under the null hypothesis, we have Qn − mtr(Σ) Λς√m ⇒ N(0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5) Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 illustrates an interesting dichotomy: the conventional MANOVA test statistic Qn can have one of the two different asymptotic null distributions, depending on the magnitude of the unknown quantity Λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' This nature of dichotomy poses extra difficulty for utilizing Qn to test (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) in practical implementation as we need to predetermine which asymptotic distribution to use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Any subjective choice may lead to unreliable conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To illustrate this, suppose now Λ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For α ∈ (0, 1), let G−1 n (α) denote the (1 − α)th quantile of Gn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Based on Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, an α level test for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) is given by Φ0 = I{Qn > G−1 n (α)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' However, if one implements Φ0 under the case where Λ → ∞, then the type I error of Φ0 satisfies that P(Φ0 = 1 | H0) → 1/2, which implies that Φ0 in this scenario (Λ → ∞) is no better than random guessing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Recently much attention has been paid to studying the dichotomy and similar phase transition phenomenon of the asymptotic distribution of classical tests under the high dimensional setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For instance, Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2019) studied the Pearson’s chi-squared test under the scenario where the number of cells can increase with the sample size and demonstrated that the corresponding asymptotic distribution can be either chi-squared or normal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2021) derived the phase transition boundaries of several standard likelihood ratio tests on multivariate mean and covariance structures of Gaussian random vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In addition to these tests, we suspect similar phenomenon can occur for many other traditional tests as the dimension increases with the sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More importantly, as in our paper, investigating these phase transition phenomena of classical tests not only contributes to the theoretical development but also motivates us to propose new test procedure or more advanced approximation distributional theory which are suitable under the high dimensional scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The following lemma establishes an upper bound for ∆q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assuming that Mq < ∞, then we have ∆q < 2 � 1 m max 1≤i≤n Pii �δ/2 Mq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Condition (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4) can be viewed as the Lyapunov-type condition for high dimensional Gaussian approximation of Qn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is quite natural and does not impose any explicit restriction on the relation between 5 the dimension d and the sample size n directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4) can be dimension free for some commonly used models, namely, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4) holds for arbitrary dimension d ≥ 1 as long as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For instance, suppose that {Vi}n i=1 follow the linear process model Vi = Aξi (i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='6) where A is a d × L matrix for some integer L ≥ 1, ξi = (ξi1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , ξiL)⊤ and {ξiℓ}i,ℓ∈N are independent zero- mean random variables with uniformly bounded qth moment E|ξiℓ|q ≤ C < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Applying the Burkholder inequality leads to Mq ≤ (1 + δ)q max1≤ℓ≤L ∥ξiℓ∥2q q .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Consequently, Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 reveals that a sufficient condition for ∆q → 0 is 1 m max 1≤i≤n Pii → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7) It is worth mentioning that (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7) depends only on the projection matrix P and does not impose any re- striction on the dimension d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Moreover, under Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7) is automatically satisfied in view of max1≤i≤n(Pii/m)2 ≤ m−2 �n i=1 P 2 ii → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 Modified U type test statistics The dichotomous nature of the asymptotic null distribution makes Qn unsuitable for testing (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) in the high dimensional setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' This motivates us to propose a modified U type test statistic of Qn for which such a dichotomy does not occur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To fix the idea, let B0 ∈ Rp×d denote the coefficient matrix of model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) under the null hypothesis such that CB0 = 0 and Y H0 = XB0 + V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Motivated by Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, a natural candidate of the test statistic Qn would be Qn,0 = Qn − n � k=1 Pkk(Yk − B⊤ 0 Xk)⊤(Yk − B⊤ 0 Xk), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='8) which coincides with Q⋆ n in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) under the null hypothesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' However, B0 is unknown in practice and hence Qn,0 is infeasible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The primary goal of this section is to propose a consistent empirical approximation Un for Qn,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, motivated by the discussions in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, the modified test statistic Un should satisfy that Un H0 = n � i=1 � j̸=i KijV ⊤ i Vj and Un − Qn,0 √var(Qn,0) H0 = oP(1), for some symmetric matrix K = (Kij)n×n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The latter ensures that Un is asymptotically equivalent to Qn,0 in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Towards this end, let �B0 be the least square estimator of B under the constraint CB = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then Y − X �B0 = (In − P0)Y , where P0 = X(X⊤X)−1X⊤ − P is the projection matrix of model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) under the null hypothesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In view of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='8), the modified U type test statistic is then defined by Un = Qn − n � k=1 θk(Yk − �B⊤ 0 Xk)⊤(Yk − �B⊤ 0 Xk) H0 = n � i=1 � Pii − n � k=1 θk ¯P 2 ik,0 � V ⊤ i Vi + n � i=1 � j̸=i � Pij − n � k=1 θk ¯Pik,0 ¯Pjk,0 � V ⊤ i Vj = n � i=1 � j̸=i � Pij − n � k=1 θk ¯Pik,0 ¯Pjk,0 � V ⊤ i Vj, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9) 6 where ¯P0 = In − P0 = ( ¯Pij,0)n×n and the last equality follows by taking θ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , θn to be the solutions of the following linear equations n � k=1 ¯P 2 ik,0θk = Pii (i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='10) It is worth mentioning that typically θk in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9) are not Pkk, as one would naturally like to use in view of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We can view (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='10) as a detailed balanced condition as it removes the diagonals in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Denote θ = (θ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , θn)⊤ and rewrite (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='10) in the more compact matrix form ( ¯P0 ◦ ¯P0)θ = (P11, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Pnn)⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='11) Let Pθ = P − ¯P0Dθ ¯P0 = (Pij,θ)n×n, where Dθ = diag(θ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , θn) is a diagonal matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then Pii,θ = 0 for all i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n in view of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='11) and Un H0 = tr(V ⊤PθV ) = n � i=1 � j̸=i Pij,θV ⊤ i Vj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Before proceeding, we first introduce a sufficient condition such that Un exists and is well defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that there exists a positive constant ϖ0 < 1/2 such that max 1≤i≤n Pii,0 ≤ ϖ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='12) Then the matrix ¯P0◦ ¯P0 is strictly diagonally dominant and |Pθ|2 F = m−�n i=1 θiPii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Moreover, if max1≤i≤n Pii ≤ ϖ1ζ for some positive constant ϖ1 < 1/2, where ζ = (1 − 2ϖ0)(1 − ϖ0), then we have max1≤i≤n |θi| ≤ ϖ1 < 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Condition (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='12) ensures the matrix ¯P0 ◦ ¯P0 is invertible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Consequently the solution θ of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='11) exists and is unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is worth noting that θ is independent of the dimension d and only depends on the projection matrices P and P0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Moreover, as shown in the proof of Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3, n � i=1 θiPii ≤ 1 ζ n � i=1 P 2 ii and max 1≤i≤n |θi| ≤ 1 ζ max 1≤i≤n Pii, which are essential to upper bound the quantity ∆q,θ in Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='6 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Consequently, under Assump- tion 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, suppose �n i=1 P 2 ii ≤ mζ/2 for sufficiently large n, we obtain var(Un) = 2|Pθ|2 Fς2 = 2 � m − n � i=1 θiPii � ς2 > mς2, which ensures the proposed test statistic Un is non-degenerate and well defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Since col(X(X⊤X)−1C⊤) ⊂ col(X), where col(·) denotes the column space, P0 = X(X⊤X)−1X⊤− P defined above is also a projection matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Hence max{Pii, Pii,0} ≤ X⊤ i (X⊤X)−1Xi uniformly for i ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n} and a sufficient condition for Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 would be max 1≤i≤n X⊤ i (X⊤X)−1Xi ≤ min{ϖ0, (1 − 2ϖ0)(1 − ϖ0)ϖ1}, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='13) 7 which is fairly mild on the design matrix X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More specifically, it is commonly assumed (Huber, 1973, Portnoy, 1985, Wu, 1986, Shao and Wu, 1987, Shao, 1988, Mammen, 1989, Navidi, 1989, Lahiri, 1992) for the linear regression model that max1≤i≤n X⊤ i (X⊤X)−1Xi → 0, which ensures a kind of “robustness of design” (Huber, 1973).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It also implies Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 in view of Remark 1 and can be viewed as a imbalance measure of model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) (Shao and Wu, 1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Suppose X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Xn are independent Gaussian random vectors N(0, Γ), where the covariance matrix Γ ∈ Rp×p has minimal eigenvalue λmin(Γ) > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, with probability at least 1−2 exp(−n/2)−n−1, we have max 1≤i≤n X⊤ i (X⊤X)−1Xi ≤ 9p + 18√2p log n + 36 log n n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='14) Consequently, condition (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='13) holds with high probability as long as p/n is sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Under the conditions of Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3, we have E(Un) ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, E(Un) = 0 if and only if CB = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 Asymptotic distribution of the modified test statistics The primary goal of this section is to establish a Gaussian approximation for the modified test statistic Un.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Following (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3), the Gaussian analogue of Un is defined by Gn = tr(Z⊤PθZ) = n � i=1 � j̸=i Pij,θZ⊤ i Zj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The following theorem establishes a non-asymptotic upper bound of the Kolmogorov distance between the distribution functions of Un and its Gaussian analogue Gn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Compared with Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, it reveals that the modification of the test statistic Qn in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9) removes the dichotomous nature of its asymptotic null distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let q = 2 + δ, where 0 < δ ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='12) holds and that ∆q,θ = �n i=1 � j̸=i |Pij,θ|q mq/2 Mq + �n i=1(� j̸=i P 2 ij,θ)q/2 mq/2 Lq → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, under Assumptions 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 and the null hypothesis, we have ρ(Un, Gn) ≤ Cq∆1/(2q+1) q,θ + C � 1 m n � i=1 P 2 ii �1/5 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Similar to Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2, we establish a similar upper bound for ∆q,θ in the following lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Under condition (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='12), we have ∆q,θ ≲ � 1 m max 1≤i≤n Pii �δ/2 Mq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 For α ∈ (0, 1), Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4 and Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 motivate an α level test for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) as follows, Φθ = I � Un ς|Pθ|F √2 > c1−α � , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='15) where c1−α is the (1 − α)th quantile of the standardized Gn/√var(Gn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is worth mentioning that the approximating distribution Gn may or may not be asymptotically normal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let λ1(Pθ), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , λn(Pθ) denote the eigenvalues of the symmetric matrix Pθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Being a quadratic functional of Gaussian random vectors {Zi}n i=1, Gn is distributed as a linear combination of independent chi-squared random variables, Gn D= d � k=1 n � i=1 λk(Σ)λi(Pθ)ηik(1) = d � k=1 n � i=1 λk(Σ)λi(Pθ){ηik(1) − 1}, where {ηik(1)}i,k∈N are independent χ2 1 random variables and the last equality follows from the fact that �n i=1 λi(Pθ) = �n i=1 Pii,θ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More specifically, the Lindeberg-Feller central limit theorem and Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 imply that Gn/√var(Gn) ⇒ N(0, 1) if and only if λ1(Σ) ς√m → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16) Consequently, c1−α in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='15) is asymptotically equal to the standard normal quantiles whenever (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16) holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' When m → ∞, condition (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16) automatically holds for arbitrary dimension d ≥ 1 as λ1(Σ) ≤ ς.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Otherwise, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16) is equivalent to tr(Σ4)/ς4 → 0, which is a common assumption to ensure the asymptotic normality of high dimensional quadratic statistics;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see, for example, Bai and Saranadasa (1996), Chen and Qin (2010), Cai and Ma (2013), Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2018) and Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2018) among others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, it reveals that the asymptotic null distribution of Un can be non-normal if (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16) is violated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For example, let Y1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , Yn ∈ Rd be i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' random vectors with mean vector µY = E(Y1) and consider testing whether µY = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that Σ = cov(Y1) = (Σjk)d×d has entries Σjk = ϑ + (1 − ϑ)I{j = k} for some constant ϑ ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then λ1(Σ)/(ς√m) → 1 and it follows from Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 that Un √var(Un) = �n i=1 � j̸=i Y ⊤ i Yj ς√{2n(n − 1)} ⇒ χ2 1 − 1 √2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The simulation study in Section 5 shows that our Gaussian multiplier bootstrap approach have a satisfactory performance regardless of whether Un is asymptotically normal or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3 Applications As mentioned in the introduction, our paradigm (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) is fairly general and it can be applied to many commonly studied hypothesis testing problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In this section, we consider two specific examples to illustrate the usefulness of the proposed U type test statistic and the corresponding asymptotic distribution theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 High dimensional one-way MANOVA Let {Yij}ni j=1, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , K, be K ≥ 2 independent samples following the model Yij = µi + Vij (j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , ni;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , K), 9 where µ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , µK ∈ Rd are unknown mean vectors of interest, {Vij}j∈N are i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' d-dimensional random vectors with E(Vi1) = 0 and cov(Vi1) = Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We are interested in testing the equality of the K mean vectors, namely, testing the hypotheses H0 : µ1 = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' = µK versus H1 : µi ̸= µl for some 1 ≤ i ̸= l ≤ K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Following the construction of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9), we propose the U type test statistic UnK = K � i=1 Pii,K ni � j=1 � k̸=j Y⊤ ijYik + K � i=1 � l̸=i Pil,K ni � j=1 nl � k=1 Y⊤ ijYlk, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) where n = �K i=1 ni is the total sample size, Pii,K = 1 n − 2 � n ni − n + K − 2 n − 1 � and Pil,K = 1 n − 2 � 1 ni + 1 nl − n + K − 2 n − 1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In the context of two sample test for mean vectors where K = 2, UnK in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) reduces to UnK = �n1 i=1 � j̸=i �n2 k=1 � l̸=k(Y1i − Y2k)⊤(Y1j − Y2l) (n − 1)(n − 2)n1n2/n , which coincides with the commonly used U type test statistic (Chen and Qin, 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For each i ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , K}, let {Zij}j∈N be i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' centered Gaussian random vectors with covariance matrix cov(Zij) = Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Following (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3), the Gaussian analogue of UnK is defined by GnK = K � i=1 Pii,K ni � j=1 � k̸=j Z⊤ ijZik + K � i=1 � l̸=i Pil,K ni � j=1 nl � k=1 Z⊤ ijZlk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let nmin = min1≤l≤K nl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Since max1≤i≤n Pii ≤ n−1 min, Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 holds as long as nmin → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The following proposition establishes a non-asymptotic upper bound on the Kolmogorov distance between the distribution functions of UnK and GnK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let q = 2 + δ for some 0 < δ ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that nmin → ∞ and � Mq = max 1≤l,l′≤K E ���� V⊤ l1Vl′2 ς ���� q < ∞, where ς = |Σ|F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, under the null hypothesis, we have ρ(UnK, GnK) ≤ Cq � � Mqn−δ/2 min �1/(2q+1) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is worth mentioning that both the dimension d and the number of groups K can grow with the total sample size n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, as discussed in Remark 4, if all the K samples follow the linear process model in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='6), ρ(UnK, GnK) → 0 as long as nmin → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 10 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 High dimensional nonparametric one-way MANOVA For each i ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , K}, let Fi denote the distribution function of Yi1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We consider testing whether these K independent samples are equally distributed, namely, testing the hypotheses H0 : F1 = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' = FK versus H1 : Fi ̸= Fl for some 1 ≤ i ̸= l ≤ K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) Being fundamental and important in statistical inference, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) has been extensively studied;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see, for example, Kruskal and Wallis (1952), Akritas and Arnold (1994), Brunner and Puri (2001), Rizzo and Sz´ekely (2010) and Thas (2010) among many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' However, all the aforementioned works mainly focus on the traditional low dimensional scenario and testing (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) for high dimensional random vectors has been much less studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In this section, we propose a new U type test statistic for (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) following the intuition of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9) and establish the corresponding distributional theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, our asymptotic framework is fairly general and allows both the dimension d and the number of groups K to grow with n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To begin with, for each i ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , K}, let φi(t) = E{exp(ıt⊤Yij)} denote the characteristic function of Yij, where ı stands for the imaginary unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then it is equivalent to test the hypotheses H0 : φ1 = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' = φK versus H1 : φi ̸= φl for some 1 ≤ i ̸= l ≤ K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) Denote Yij(t) = exp(ıt⊤Yij).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Similar to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1), our test statistic for (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3) is defined by �UnK = K � i=1 Pii,K ni � j=1 � k̸=j � Yij(t)Yik(t)w(t)dt + K � i=1 � l̸=i Pil,K ni � j=1 nl � k=1 � Yij(t)Ylk(t)w(t)dt, where w(t) ≥ 0 is a suitable weight function such that the integrals above are well defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Discussions of some commonly used weight functions are given in Remark 9 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Before proceeding, we first define the Gaussian analogue of �UnK under the null hypothesis that the K samples are equally distributed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Define the covariance function of Y11(t) as Σ(t, s) = E{Y11(t) − φ1(t)}{Y11(s) − φ1(s)} = φ1(t − s) − φ1(t)φ1(−s) (t, s ∈ Rd).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Throughout this section, by Mercer’s theorem, we assume that the covariance function above admits the following eigendecomposition Σ(t, s) = ∞ � m=1 λmϕm(t)ϕm(s) (t, s ∈ Rd), where λ1 ≥ λ2 ≥ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' ≥ 0 are eigenvalues and ϕ1, ϕ2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', are the corresponding eigenfunctions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We now apply the Karhunen–Lo`eve theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let {Zijk}i,j,k∈N be independent standard normal random variables and define Gaussian processes Zij(t) = ∞ � m=1 √λmZijmϕm(t) (t ∈ Rd).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, following (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3), the Gaussian analogue of �UnK is defined by �GnK = K � i=1 Pii,K ni � j=1 � k̸=j � Zij(t)Zik(t)w(t)dt + K � i=1 � l̸=i Pil,K ni � j=1 nl � k=1 � Zij(t)Zlk(t)w(t)dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 11 Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let q = 2 + δ for some 0 < δ ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that nmin → ∞ and � Mq = E ����� � Rd E{Y11(t)}E0{Y12(t)}w(t)dt F ����� q < ∞, where F2 = ∞ � m=1 λ2 m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, under the null hypothesis that these K independent samples are equally distributed, we have ρ(�UnK, �GnK) ≤ Cq � � Mqn−δ/2 min �1/(2q+1) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is worth mentioning that the proposed test statistic �UnK contains high dimensional integral over t ∈ Rd, which can be computational intractable in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To make �UnK well defined and facilitate the computation, we shall choose suitable weight function w(t) such that �UnK has a simple closed-form expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In the literature, various kinds of weight functions have been proposed such as the Gaussian kernel function (Gretton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2012), the Laplace kernel function (Gretton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2012) and the energy kernel function (Sz´ekely et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2007, Rizzo and Sz´ekely, 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For instance, let w(t) denote the density function of the random vector Xκ/√η for some κ > 0, where X ∼ N(0, Id) and η ∼ χ2 1 are independent (equivalently Xκ/√η is a Cauchy random variable with location parameter 0 and scale parameter κ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then it is straightforward to verify that � Yij(t)Ylk(t)w(t)dt = � cos{t⊤(Yij − Ylk)}w(t)dt = exp(−κ|Yij − Ylk|), which is the same as the Laplace kernel function with 1/κ being its bandwidth, where | · | stands for the Euclidean distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A more general result can be derived using Bochner’s Theorem, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 of Gretton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Consequently, the proposed test statistic �UnK reduces to �UnK = K � i=1 Pii,K Ni � j=1 � k̸=j exp(−κ|Yij − Yik|) + K � i=1 � l̸=i Pil,K Ni � j=1 Nl � k=1 exp(−κ|Yij − Ylk|), which is fairly convenient to compute in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Moreover, suitable choice of the weight function w(t) also facilitate the analysis of the quantities Mq and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 4 Practical implementation In this section, we propose an unbiased estimator for ς2, which is ratio-consistent under fairly mild moment conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To begin with, since E(V ⊤ i Vj)2 = ς2 for any i ̸= j, a natural unbiased U type estimator for ς2 based on {Vi}n i=1 would be �ς2 o = 1 n(n − 1) n � i=1 � j̸=i (V ⊤ i Vj)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) Let ¯P1 = In − X(X⊤X)−1X⊤ = (Pij,1)n×n and �V = ¯P1Y = (�V1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , �Vn)⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It is worth noting that directly substituting the residual vectors {�Vi}n i=1 into (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) yields a feasible but generally biased estimator for ς2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More specifically, for any i ̸= j, E(�V ⊤ i �Vj)2 = ( ¯Pii,1 ¯Pjj,1 + ¯P 2 ij,1)ς2 + ¯P 2 ij,1E(V ⊤ 1 V1)(V ⊤ 2 V2) + n � k=1 ( ¯Pik,1 ¯Pjk,1)2 � ∥E0(V ⊤ 1 V1)∥2 2 − 2ς2� , 12 which reveals that (�V ⊤ i �Vj)2 is no longer unbiased of ς2 even after proper scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' This motivates us to propose a new unbiased estimator for ς2 via data-splitting, which excludes the bias terms (V ⊤ i Vi)2 and (V ⊤ i Vi)(V ⊤ j Vj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Without loss of generality, we assume that the sample size n is even in what follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Randomly split {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n} into two halves A and Ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Denote MA = {(Xi, Yi), i ∈ A} and MAc = {(Xi, Yi), i ∈ Ac}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For both MA and MAc, fit model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) with the least squares estimates and compute �ΣA = 1 n/2 − p �V ⊤ A �VA and �ΣAc = 1 n/2 − p �V ⊤ Ac �VAc, where �VA and �VAc are the residual matrices of MA and MAc, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Compute the estimator �ς2 A = tr(�ΣA�ΣAc).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Since �ΣA and �ΣAc are independent and both of them are unbiased estimators of Σ, �ς2 A is unbiased for ς2 as E(�ς2 A) = tr{E(�ΣA)E(�ΣAc)} = tr(Σ2) = ς2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assume that p/n < ϖ2 for some positive constant ϖ2 < 1/2 and that the least squares estimates are well defined for both MA and MAc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then we have E ���� �ςA ς − 1 ���� 2 ≲ M4 n2 + p × tr(Σ4) n2ς4 + ∥E0(V ⊤ 1 ΣV1)∥2 2 nς4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 is given in Section 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2, where a more general upper bound on E|�ςA/ς −1|τ is established for 1 < τ ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 reveals that �ςA is ratio consistent under mild moment conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Suppose now {Vi}i∈N follow the linear process model (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='6) with max1≤ℓ≤L E|ξiℓ|4 ≤ C < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then M4 is bounded and ∥E0(V ⊤ 1 ΣV1)∥2 2 ≲ tr(Σ4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Consequently, E ���� �ςA ς − 1 ���� 2 ≲ n−2 + tr(Σ4) nς4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In this case, �ςA is ratio consistent for arbitrary dimension d ≥ 1 as long as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' There are totally � n n/2 � different ways of splitting {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n} into two halves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To reduce the influence of randomness of an arbitrary splitting, we can repeat the procedure independently for multiple times and then take the average of the resulting estimators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We refer to Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2012) for more discussions about data-splitting and repeated data-splitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Remark 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let �Σ = (n − p)−1 �V ⊤ �V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Observe that E(�V ⊤ i �Vj) = ¯Pij,1tr(Σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We can estimate ς2 via �ς2 S = �n i,j=1 |�V ⊤ i �Vj − ¯Pij,1tr(�Σ)|2 (n − p + 2)(n − p − 1) = (n − p)2 (n − p + 2)(n − p − 1) � |�Σ|2 F − {tr(�Σ)}2 n − p � , which is same as the estimator proposed in Srivastava and Fujikoshi (2006), where {Vi}n i=1 are assumed to be Gaussian random vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' See also Bai and Saranadasa (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' However, for non-Gaussian {Vi}n i=1 such that ∥E0(V ⊤ 1 V1)∥2 2 ̸= 2ς2, this estimator is generally biased as E(�ς2 S) − ς2 = �n i=1 ¯P 2 ii,1 (n − p)(n − p + 2) � ∥E0(V ⊤ 1 V1)∥2 2 − 2ς2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, the bias of �ς2 S can diverge when ∥E0(V ⊤ 1 V1)∥2 2 is much larger than ς2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Below we provide an example that typifies the diverging bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 13 G G G G G G 2 4 6 8 10 12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 d × 100 G Split SF Oracle G G G G G G 2 4 6 8 10 12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 d × 100 Figure 1: Empirical averages of the values of |�ς/ς − 1| Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let {ξi}i∈N and {ξ′ i}i∈N be two sequences of independent Gaussian random vectors N(0, Σ), where Σ = (Σij)n×n has entries Σij = ϑ|i−j| for some ϑ ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Following Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' (2015), we draw i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' innovations {Vi}n i=1 from a scale mixture of two independent multivariate Gaussian distributions as follows, Vi = νi × ξi + 3(1 − νi) × ξ′ i (i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n), where {νi}i∈N are independent Bernoulli random variables with P(νi = 1) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A simulation study is given in Section 5 by setting ϑ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We report in Figure 1 the average values of |�ς/ς −1| for �ςA, �ςo and �ςS, based on 1000 replications with the numerical setup (n, p, m) = (100, 20, 10) and d = 200, 400, 800, 1000, 1200.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For both cases of ϑ, |�ςA/ς −1| and |�ςo/ς −1| are very close to 0, while |�ςS/ς −1| is quite large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More precisely, we can derive that ∥E0(V ⊤ 1 V1)∥2 2 ≈ (18 + d)ς2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Substituting the ratio-consistent estimator �ς2 A into var(Un) = 2|Pθ|2 Fς2 yields Un/(�ςA|Pθ|F) ⇒ N(0, 2) under (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Then, for α ∈ (0, 1), an asymptotic α level test is given by ΦZ = I � Un �ςA|Pθ|F √2 > z1−α � , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) where z1−α is the (1 − α)th quantile of the standard normal distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 5 A simulation study In this section, we conduct a Monte Carlo simulation study to assess the finite sample performance of the proposed tests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In the model (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1), we write Xi = (1, x⊤ i )⊤ ∈ Rp to include an intercept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Here x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , xn ∈ Rp−1 are i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' N(0, Ip−1) random vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let m < p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For all k ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , p − m}, all entries of the coefficient vector Bk are i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' uniform random variables in the interval (1, 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' After those Bk’s are generated, we keep their values throughout the simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Our goal is to identify the zero Bk’s by testing H0 : Bp−m+1 = Bp−m+2 = · · · = Bp = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 14 In our simulation, we set (p, m) = (20, 10), n = 100, 200 and d = 400, 800, 1200.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We consider two different designs of the innovations (Vi): the one introduced in Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 and the one in Example 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In both examples, the parameter ϑ is set to be 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Example 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let {ξij}i,j∈N be i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' random variables with E(ξ11) = 0 and var(ξ11) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In particular, we consider two cases for (ξij);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' they are drawn from the standardized t5 distribution and the standardized χ2 5 distribution, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For some ϑ ∈ (0, 1), we generate Vi = √(1 − ϑ) × ξi + √ϑ × (ξi0, ξi0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , ξi0)⊤, i ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We shall apply a Gaussian multiplier bootstrap approach to implement our proposed test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The procedure is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Compute the residual matrix �V = (�V1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , �Vn)⊤ = ¯P1Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Generate i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' N(0, 1) random variables {ωij}i,j∈N and compute the bootstrap residuals V ⋆ = (V ⋆ 1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , V ⋆ n )⊤, where V ⋆ i = 1 √(n − p) n � j=1 ωij �Vi (i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Use V ⋆ to compute �ς⋆ A and the bootstrap test statistic U ⋆ n = tr(V ⋆⊤PθV ⋆).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Repeat the first two steps independently B times and collect U ⋆ nk and �ς⋆ Ak, k = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' , B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Let �c1−α be the (1 − α)th quantile of {U ⋆ nk/(�ς⋆ Ak|Pθ|F √2)}k=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=',B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The our test is ΦB = I � Un �ςA|Pθ|F √2 > �c1−α � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) and we shall reject the null hypothesis whenever ΦB = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Similar to Gn, U ⋆ n is a quadratic functional of i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Gaussian random vectors conditional on {X, Y } and is distributed as a linear combination of independent chi-squared random variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To justify the validity of the proposed Gaussian multiplier bootstrap approach, it suffices to bound the distance between the distribution functions of these two quadratic functionals, which can be established by verifying the normalized consistency (Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2014) of the corresponding covariance matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' However, this can be highly non-trivial in the high dimensional setting and is beyond the scope of current paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Hence we leave it for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In our simulation, we set the bootstrap size B = 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' As comparison, we also perform the test suggested in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2) based on the central limit theorem and the one proposed in Srivastava and Kubokawa (2013) which we denote by SK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For each test, we report the empirical size based on 2000 replications as displayed in Table 1 and Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The results suggest that our proposed test by using the bootstrap procedure provides the best size accuracy in general as the empirical sizes are close to the nominal level α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, both of the test by CLT and our Gaussian multiplier bootstrap method have better performance than the SK test since the latter is too conservative as d is large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' As expected from our theoretical results, normal approximation can work reasonably well in this design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For Example 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1, the Gaussian multiplier bootstrap method outperforms other two procedures in size accuracy for all cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The SK test suffers from size distortion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The test by CLT inflates the size more than 15 Table 1: Empirical sizes for Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 with α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='05 θ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 θ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7 n d CLT GMB SK CLT GMB SK 100 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='059 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='051 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='036 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='063 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='026 1200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='048 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='028 200 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='037 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='050 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='033 1200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='050 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='035 Table 2: Empirical sizes for Example 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 with α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='05 t5 χ2 5 θ n d CLT GMB SK CLT GMB SK 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 100 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='065 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='036 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='082 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='016 1200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='082 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='067 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='011 200 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='059 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='067 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='018 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='014 1200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='059 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='077 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7 100 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='082 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='002 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='084 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='085 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='000 1200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='001 200 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='067 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='080 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='064 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='000 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='050 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='075 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='000 1200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='070 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='051 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='000 16 the GMB method, which can be explained by the fact that condition (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1) does not hold and the CLT for Un fails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More specifically, for both θ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 and θ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7, elementary calculations show that λ1(Σ)/ς → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' As a result, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='16) is violated as m = 10;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' see also the comment at the end of Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 for discussion on the non-normality of Un.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' To have more insight, we display in Figure 2 the density plots of Un/√var(Un) for n = 100 as well as the density of N(0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' As we can see from the plots, the distribution of Un/√var(Un) is skewed to the right for all cases, which explains the inflated sizes of the CLT test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More simulation studies on power comparison of these three tests are conducted in Section 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Figure 2: Density plots of Un/√var(Un) and N(0, 1) −4 −2 0 2 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 Density d=400 d=800 d=1200 Normal −4 −2 0 2 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 −4 −2 0 2 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 x Density −4 −2 0 2 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 x density 6 Data analysis We apply the proposed method to two data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Our first dataset came from a study of the impact of the gut microbiome on host serum metabolome and insulin sensitivity in non-diabetic Danish adults (Pedersen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' It consists of measurements of 1201 metabolites (325 serum polar metabolites and 876 serum molecular lipids) on 289 serum samples using mass spectrometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The cleaned dataset was downloaded from https://bitbucket.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='org/hellekp/clinical-micro-meta-integration (Pedersen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We use this data set to identify insulin resistance (IR)-associated metabolites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' IR was estimated by the homeostatic model assessment (Pedersen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Body mass index (BMI) is a confounder for this dataset since it is highly correlated with IR (Spearman’s ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='67) and is known to affect the serum metabolome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Two samples without IR measurement were excluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' For metabolites with zero measurements, zeros were replaced by half of the minimal nonzero value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Log transformation was performed to make the data more symmetrically distributed before analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The p-values associated with the three methods (GLT, GMB, and SK) are all 17 very close to zero, indicating a strong dependence between metabolites and IR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We further perform a linear regression analysis on each metabolite using IR and BMI as the covariates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Figure 3 (left panel) presents the histogram of the p-values on testing the significance of the coefficients associated with IR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We see a high peak close to zero, which provides strong evidence on the association between metabolites and IR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We further apply the Holm–Bonferroni procedure to the p-values to control the family-wise error rate at the 5% level, resulting in 164 discoveries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Our second dataset is from the study of the smoking effect on the human upper respiratory tract (Charlson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The original data set contains samples from both throat and nose microbiomes and both body sides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Here we focus on the throat microbiome of the left body side, which includes 60 subjects consisting of 32 nonsmokers and 28 smokers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' More precisely, the data set is presented as a 60×856 abundance table recording the frequencies of detected operational taxonomic units (OTUs) in the samples using the 16S metagenomics approach, together with a metadata table capturing the sample-level information, including the smoking status and sex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We transform the OTU abundance using center log-ratio (CLR) transformation after adding a pseudo-count of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='5 to the zero counts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Our goal is to test the association of throat microbiomes with smoking status adjusting for sex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The proposed method using either the normal approximation or bootstrap approximation detects a strong association between the throat microbiomes with smoking status.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' In contrast, the SK method fails to discover the association.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' We further perform an OTU-wise linear regression analysis using each OTU (after the CLR transfor- mation) as the response and the smoking status and sex as covariates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Figure 3 (right panel) presents the histogram of the p-values for testing the association between each OTU and smoking status after adjusting sex in each linear regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Interestingly, adjusting the multiplicity using either the Holm–Bonferroni pro- cedure or the BH procedure at the 5% level gives zero discovery (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' These results suggest that the association between individual OTU and smoking status is weak.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' However, after aggregating the weak effects from all the OTUs, the combined effect is strong enough to be detected by the proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Table 3: P-values of the three methods applying to the metabolomics and microbiome data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Metabolomics Microbiome CLT GMB SK CLT GMB SK p-value 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7 × 10−6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='13 References Michael G Akritas and Steven F Arnold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Fully nonparametric hypotheses for factorial designs I: Multivariate repeated measures designs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assoc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 89(425):336–343, 1994.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 11 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Anderson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' An introduction to multivariate statistical analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Wiley Series in Probability and Statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Zhidong Bai and Hewa Saranadasa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Effect of high dimension: by an example of a two sample problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Sinica, 6(2):311–329, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2, 9, 13 18 Figure 3: Histograms of the p-values for testing the association between individual omics feature and the variable of interest after adjusting for the confounder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Metabolomics Microbiome 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0% 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0% 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0% 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='0% pvalue count/sum(count) Edgar Brunner and Madan L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Puri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Nonparametric methods in factorial designs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Papers, 42(1):1–52, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 11 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Tony Cai and Zongming Ma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Optimal hypothesis testing for high dimensional covariance matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Bernoulli, 19(5B):2359–2388, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 9 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Tony Cai and Yin Xia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' High-dimensional sparse MANOVA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 131:174–196, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Emily S Charlson, Jun Chen, Rebecca Custers-Allen, Kyle Bittinger, Hongzhe Li, Rohini Sinha, Jennifer Hwang, Frederic D Bushman, and Ronald G Collman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Disordered microbial communities in the upper respiratory tract of cigarette smokers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' PloS one, 5(12):e15216, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 18 Song Xi Chen and Ying-Li Qin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A two-sample test for high-dimensional data with applications to gene-set testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 38(2):808–835, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2, 9, 10 Xiaohui Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Gaussian and bootstrap approximations for high-dimensional U-statistics and their applica- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 46(2):642–678, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3 Jianqing Fan, Shaojun Guo, and Ning Hao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Variance estimation using refitted cross-validation in ultrahigh dimensional regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Methodol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 74(1):37–65, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 13 F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' G¨otze and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Tikhomirov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Asymptotic distribution of quadratic forms and applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Theoret.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Probab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 15(2):423–475, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3 F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' G¨otze and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Tikhomirov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Asymptotic distribution of quadratic forms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Probab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 27(2):1072–1098, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 3 19 Arthur Gretton, Kenji Fukumizu, and Bharath K Sriperumbudur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Discussion of: Brownian distance covari- ance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 3(4):1285–1294, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 12 Arthur Gretton, Karsten M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Borgwardt, Malte J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Rasch, Bernhard Sch¨olkopf, and Alexander Smola.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A kernel two-sample test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 13:723–773, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 12 Yinqiu He, Bo Meng, Zhenghao Zeng, and Gongjun Xu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' On the phase transition of wilks’ phenomenon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Biometrika, 108(3):741–748, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 5 Jiang Hu, Zhidong Bai, Chen Wang, and Wei Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' On testing the equality of high dimensional mean vectors with unequal covariance matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 69(2):365–387, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Peter J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Huber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Robust regression: asymptotics, conjectures and Monte Carlo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 1:799–821, 1973.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 William H Kruskal and W Allen Wallis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Use of ranks in one-criterion variance analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assoc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 47(260):583–621, 1952.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 11 Soumendra Nath Lahiri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Bootstrapping M-estimators of a multiple linear regression parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 20(3):1548–1570, 1992.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 Huiqin Li, Jiang Hu, Zhidong Bai, Yanqing Yin, and Kexin Zou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Test on the linear combinations of mean vectors in high-dimensional data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' TEST, 26(1):188–208, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Enno Mammen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Asymptotics with increasing dimension for robust regression with applications to the boot- strap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 17(1):382–400, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 William Navidi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Edgeworth expansions for bootstrapping regression models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 17(4):1472–1478, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 Helle Krogh Pedersen, Valborg Gudmundsdottir, Henrik Bjørn Nielsen, Tuulia Hyotylainen, Trine Nielsen, Benjamin AH Jensen, Kristoffer Forslund, Falk Hildebrand, Edi Prifti, Gwen Falony, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Human gut microbes impact host serum metabolome and insulin sensitivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Nature, 535(7612):376–381, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 17 Helle Krogh Pedersen, Sofia K Forslund, Valborg Gudmundsdottir, Anders Østergaard Petersen, Falk Hilde- brand, Tuulia Hy¨otyl¨ainen, Trine Nielsen, Torben Hansen, Peer Bork, S Dusko Ehrlich, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A com- putational framework to integrate high-throughput ‘-omics’ datasets for the identification of potential mechanistic links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Nature protocols, 13(12):2781–2800, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 17 Stephen Portnoy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Asymptotic behavior of M estimators of p regression parameters when p2/n is large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Normal approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 13(4):1403–1417, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 Maria L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Rizzo and G´abor J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Sz´ekely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' DISCO analysis: a nonparametric extension of analysis of variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 4(2):1034–1055, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 11, 12 James R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Schott.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Some high-dimensional tests for a one-way MANOVA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 98(9): 1825–1839, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 20 Jun Shao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' On resampling methods for variance and bias estimation in linear models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 16(3): 986–1008, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 Jun Shao and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Heteroscedasticity-robustness of jackknife variance estimators in linear models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 15(4):1563–1579, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 Muni S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Srivastava and Yasunori Fujikoshi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate analysis of variance with fewer observations than the dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 97(9):1927–1940, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 13 Muni S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Srivastava and Tatsuya Kubokawa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Tests for multivariate analysis of variance in high dimension under non-normality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 115:204–216, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 15 Muni S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Srivastava, Shota Katayama, and Yutaka Kano.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A two sample test in high dimensional data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 114:349–358, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 G´abor J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Sz´ekely, Maria L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Rizzo, and Nail K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Bakirov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Measuring and testing dependence by correlation of distances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 35(6):2769–2794, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 12 Olivier Thas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Comparing distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Springer Series in Statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Springer, New York, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 11 Lan Wang, Bo Peng, and Runze Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' A high-dimensional nonparametric multivariate test for mean vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Assoc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 110(512):1658–1669, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 14 Jennifer Wessel and Nicholas J Schork.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Generalized genomic distance–based regression methodology for multilocus association analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' The American Journal of Human Genetics, 79(5):792–806, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Jackknife, bootstrap and other resampling methods in regression analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 14 (4):1261–1350, 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' With discussion and a rejoinder by the author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 8 Mengyu Xu, Danna Zhang, and Wei Biao Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' L2 asymptotics for high-dimensional data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' arXiv preprint arXiv:1405.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='7244, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 15 Mengyu Xu, Danna Zhang, and Wei Biao Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Pearson’s chi-squared statistics: approximation theory and beyond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Biometrika, 106(3):716–723, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 5 Shun Yao, Xianyang Zhang, and Xiaofeng Shao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Testing mutual independence in high dimension via distance covariance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Methodol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 80(3):455–480, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 9 Matthew A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Zapala and Nicholas J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Schork.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate regression analysis of distance matrices for testing associations between gene expression patterns and related variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences, 103(51):19430–19435, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Matthew A Zapala and Nicholas J Schork.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statistical properties of multivariate distance matrix regression for high-dimensional data analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Frontiers in genetics, 3:190, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Jin-Ting Zhang, Jia Guo, and Bu Zhou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Linear hypothesis testing in high-dimensional one-way MANOVA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Multivariate Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 155:200–216, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 21 Xianyang Zhang, Shun Yao, and Xiaofeng Shao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Conditional mean and quantile dependence testing in high dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=', 46(1):219–246, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 9 Bu Zhou, Jia Guo, and Jin-Ting Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' High-dimensional general linear hypothesis testing under het- eroscedasticity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Plann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Inference, 188:36–54, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 2 Huijuan Zhou, Kejun He, Jun Chen, and Xianyang Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' Linda: Linear models for differential abundance analysis of microbiome compositional data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' arXiv preprint arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content='00242, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} +page_content=' 18 22' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/69E2T4oBgHgl3EQf7ggl/content/2301.04209v1.pdf'} diff --git a/6dE4T4oBgHgl3EQfBwu-/content/tmp_files/2301.04855v1.pdf.txt b/6dE4T4oBgHgl3EQfBwu-/content/tmp_files/2301.04855v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b2b1fe9397e5a3335614c41a54d8f953b6d3bb3 --- /dev/null +++ b/6dE4T4oBgHgl3EQfBwu-/content/tmp_files/2301.04855v1.pdf.txt @@ -0,0 +1,1475 @@ +Estimation of thermal load on the nozzle base plate from small +plumes at high temperature +Kamal Khemani1, Pradeep Kumar1*, Ganesh Natarajan2 +1 Numerical Experiment Laboratory (Radiation & Fluid Flow Physics) +Indian Institute of Technology Mandi, Himachal Pradesh, 175075, India +2 Discipline of Mechanical Engineering, +Indian Institute of Technology Palakkad, Palakkad, Kerala, 678557, India +Abstract +A numerical study is performed to estimate thermal load on the nozzle base plate, which is in the +upstream direction to the flow, from three hot plumes of pure (CO2), (H2O) and 50-50 (%) composition of +(CO2) and (H2O) expanding through a convergent-divergent (CD) nozzle in a quiescent medium at 1.1 bar +pressure and 298K temperature. The base plate of the nozzle heats up due to thermal radiation, emitting +from the hot gases in the form of plumes. The spectral radiative properties of major participating gases such +as (CO2), (H2O) are calculated from HITEMP-2010 database. A small CD nozzle which is designed for the +perfect expansion of air by 1D calculation with nozzle throat diameter 1.98 mm and area ratio 1.5942, is +considered as the design of nozzle for present study [1]. All three plumes are in the under-expanded state for +this CD nozzle and hence expands rapidly at supersonic speed as the plumes exit from the nozzle and forms +a series of expansion and compression waves. The hot plumes emanating from the nozzle develop very high +temperature in a small vicinity around the base plate, due to diffusion and develop very high temperature +on the base plate. Barring this region, the maximum amount of radiative flux on base plate for these three +plumes, i.e., CO2 plume, mixture plume and H2O plume are 4000 W/m2, 2300 W/m2 and 1300 W/m2, +respectively and the maximum temperature developed due to these corresponding fluxes are 323 K, 312 K +and 308 K, respectively. +Keywords: Compressible flow, gas radiation, thermal load, underexpanded +URL: pradeepkumar@iitmandi.ac.in (Pradeep Kumar1*) +arXiv:2301.04855v1 [physics.comp-ph] 12 Jan 2023 + +NOMENCLATURE +English Symbols +c1, c2 +First and second radiation constants +cp +Specific heat at constant pressure +e +Internal energy +h +Enthalpy +k +Thermal conductivity, turbulent kinetic energy +ˆn +Unit normal vector +p +Pressure +q +Heat flux +s +Direction vector +t +Time +u +Velocity +x +Cartesian coordinate coordinate +Ar +Area ratio +Iη +Spectral intensity +Ibη +Planck function +R +Universal gas constant +Y +Species mass-fraction +Greek Symbols +2 + +βη +Spectral extinction coefficient +ϵ +Emissivity, turbulent dissipation rate +η +Wavenumber +κη +Spectral absorption coefficient +µ +Dynamic viscosity +∇ · q +Divergence of radiative heat flux +Ω +Solid angle +φ +Azimuthal angle +Φ +Scattering phase function +ρ +Density of fluid +σsη +Spectral scattering coefficient +θ +Polar angle +τ +Viscous stress tensor, transmissivity of gas, optical thickness +Subscript +b +Blackbody +c +Conduction +cv +Convection +eff +Effective +η +Spectral +g +Gas +k +Turbulent kinetic energy +r +Radiation +t +Turbulent, total +w +Wall +3 + +1. Introduction +The exhaust plume from the nozzle is a product of high temperature and high pressure gases exiting from +the combustion chamber. These gases expand rapidly in the convergent divergent (CD) nozzle at supersonic +velocities because of the conversion of thermal energy into kinetic energy, which generates the thrust to lift +off the rocket. The structure of the plume is non uniform, containing different flow regimes and supersonic +shock patterns. It appear as bright luminous flame which emits radiation in the visible, ultraviolet (UV) +and infrared (IR) parts of the electromagnetic spectrum [2]. The major part of plume radiation comes from +participating gases like CO2, CO and H2O which show strong emission of thermal radiation in the infrared +region of the spectrum [3]. This heats up the base plate of the rocket and becomes the source of tracking by +enemies in the case of missiles, fighter jets and combat aircrafts. +Tien and Abu-Romia [4] used analytical method to estimated the amount of radiative heat flux on the +rocket base plate from exhaust CO2 and H2O gas plume with idealised physical models. They evaluated +apparent emissivity at base plate from semi infinite cylinder shape for H2O gas plume for a temperature +of 2000oR, pressure 1 atm and CO2 gas plume for a temperature of 2500oR. Nelson [5] used backward +Monte Carlo method to estimate radiative heat flux on rocket base plate from exhaust plume. They further +studied the effect of cone angle of exhaust plume and scattering albedo on the base plate heating from plume. +The increase in cone angle increased the heat flux on the base plate whereas increase of albedo decreased +the heat flux. However, increase in albedo increased the searchlight emission from plume. Baek and Kim +[6] calculated the heat load on the base plate from both exhaust plume and searchlight emission from the +particles. They used finite volume method to solve radiative transfer equation. Tan et al. [7] conducted a +study in which they changed the temperature distribution of plume from isothermal to non-isothermal and +concluded that the thermal load on thebase plate reduced 2-3 times for non-isothermal plume. They also +observed that by increasing optical thickness of medium the amount of radiative flux on the wall increased. +Everson and Nelson [8] developed reverse Monte Carlo method to predict base plate heating from plume +due to radiation and found that, reverse Monte Carlo was computationally more efficient than forward +Monte Carlo method. This was owing to the fact that only the rays that strikes the target point was only +considered. For calculations they used band models for gas spectrum and Henyey-Greenstein function for +particle scattering. They performed reverse Monte Carlo calculations for four different cases which included +pure scattering plume, gas only emission for main engine plume, solid rocket motor plume and a plume with +non-uniform temperature which absorbs, emits and scatters, and finally found that majority of emission +is due to alumina particles coming from the centre. While, H2O and Al2O2 emitted radiation from the +4 + +center of the plume and moreover major contribution of emission came from Al2O3 particles. Kumar and +Ramamurthy [9] estimated radiative heat load on the rocket base plate using forward Monte-Carlo technique +for gray conical plume with axial and radial temperature variations. They found that the radiative heat +flux changed drastically with the change in radial temperature profile also the amount of radiative heat flux +decreased with the increase in altitude as plume cools down faster. Similar arguments were given by Gu and +Baek [10] as they examined radiative heat flux from WSGGM method for a solid rocket motor from which +the thermal load was estimated by long plumes of 5 and 10 km. +Accurate modelling of heat transfer due to radiation is very necessary for safe and efficient designing of +rocket. Estimation of radiative properties of gases is crucial and the most important part in determining +heat transfer due to radiation accurately. The radiative properties of participating gases can be calculated +using some of the most popular spectral database like High Resolution Transmission Spectroscopic Molecular +Absorption database (HITRAN) [11], Carbon-Dioxide Spectroscopic Database (CDSD) [12], High Temperature +spectroscopic absorption parameter (HITEMP) [13] etc. +The spectral absorption coefficients are highly +erratic in nature containing millions of spectral lines which attain same value multiple times. This unnecessarily +increases the computational cost required to solve the radiation transfer equation (RTE) as the line-by-line +method considers calculation for each and every line on the spectrum and is therefore, mostly used only for +benchmarking purposes [14]. +Many methods are proposed to reduce the computation resource requirements such as Full spectrum +scaled and correlated k-Distribution (FSSK/FSCK) [14], Lookup based Full spectrum K-Distribution [15], +Spectral line weight sum of gray gases [16] etc. The accuracy of the above methods is well demonstrated for +uniform composition of gases [17, 18], however, the variation in composition of gaseous and their mixture +poses another level of challenge and further modelling is required [19]. In order to use look up table based +FSK method, some interpolation techniques should be adopted for the properties for current thermodynamic +states of gases in the domain. It is evident from the above literature that only a few work is available to +calculate the heat load on the rocket base plate, that to with fixed conical plume shape and radiative +properties of gases. The general heat transfer applications like, combustion, rocket propulsion, gasification +contain numerous thermodynamic states, thus it is useful to generate a database for absorption coefficient +at different temperatures, pressures and mole-fractions. The present case is optically thin thus, the RTE +is solved using the Planck mean absorption coefficient at different thermodynamic states, from look-up +table. The thermal load on the nozzle base plate has been calculated from the accurate solution of flow +and temperature fields by solving complete set of governing equation. The radiative property is obtained +5 + +from the HITEMP-2010 database, stored in the form of lookup table for range of thermodynamic states +of gases and utilized during the solution of radiative transfer equation. +The thermodynamic states for +which data is available can directly be used. Further, the Planck mean absorption coefficient for unavailable +thermodynamic states can easily be calculated by using multidimensional linear interpolation technique. +The fvDOM numerical method is used for solution of RTE coupled with fluid flow using a pressure based +compressible flow application sonicRadFoam, modified from sonicFoam application of OpenFOAM [20]. +Finally it includes the work done due to viscous forces, species transfer equation and RTE with Planck mean +absorption-emission model. +The manuscript is organised as section 2 describing the problem statement, and section 3 describing the +mathematical models and governing differential equations followed by validation in section 4, results and +discussions in section 5, and finally the present work is concluded in section 6. +2. Problem description +The convergent-divergent (CD) nozzle has throat diameter and an area-ratio of 1.98 mm and 1.5942, +respectively, and the length of convergent and divergent section is 7 mm and 14 mm, respectively as shown +in Fig. 1 which also include the buffer zone for emanating the jet in the atmosphere. The base plate is +attached at the end and the fluid expands from a stagnation pressure and temperature of 7.11 bar and 2000 +K, respectively, to a quiescent medium at the atmospheric condition of 1 atm pressure and 298K. The present +CD nozzle designed for perfect expansion of air by one dimensional calculation, has been considered for the +flow of three plumes whose constituents are pure CO2, pure water vapour and 50-50(%) CO2 and H2O from +above pressure and temperature. Initially whole domain is filled with N2 gas at 1 atm pressure and 298 K +temperature. The following assumptions have been considered for in the present study. +1. Reynolds-averaged Navier-Stokes assumption is used to model turbulent flow. +2. The participating medium only absorbs or emits the thermal radiation but does not scatters. +3. Refractive index of medium and walls are equal to one. +4. Turbulence radiation interaction is neglected. +5. Constant turbulent Prandtl number assumption has been used in the present study: +6 + +Figure 1: Schematic diagram of geometry for the calculation of the thermal load on the nozzle base plate from the hot plume +2.1. Governing equations +The density and temperature fluctuations must be accounted for compressible flow of a fluid along with +velocity and pressure fluctuations. To account for these factors, the mass based averaging commonly known +as Favre averaging [21, 22], is used to describe the flow and energy transfer for compressible turbulent fluids. +which is defined as, +�φ = ρφ +ρ +(1) +where, ρ is the density of fluid. φ is a scalar and the averaging of density is defined below, +ρ = 1 +T +� T +0 +ρ dT +(2) +∂ρ +∂t + ∂ρ �ui +∂xi += 0 +(3) +∂ρ �ui +∂t ++ ∂ρ �ui �uj +∂xj += − ∂p +∂xi ++ ∂� +τij +∂xj +(4) +7 + +Outlet +BasePlate +ww +5 +Wall +7mm +Inlet +Axis +14mm +7 mm +28mmwhere, +� +τij = µeff +� ∂ �ui +∂xj ++ ∂ �uj +∂xi +− 2 +3 δij +∂� +uk +∂xk +� +− 2 +3ρkδij +(5) +where, µeff is the effective dynamic viscosity of fluid which is the summation of molecular and turbulent +dynamic viscosity of fluid i.e (µ + µt) and the molecular viscosity of gases is given by Sutherland +µ = As T 3/2 +T + Ts +(6) +As and Ts are Sutherland’s constants and depend on the type of gas and it’s molecules, and µt is the turbulent +viscosity which is calculated as, +µt = ρ Cµ +k2 +ϵ +(7) +where k is turbulent kinetic energy and ϵ is turbulent dissipation rate and Cµ is the closure constant and +these are modelled by two equation (k.ϵ) turbulence model and given as +∂ρκ +∂t + ∂ρ �ujκ +∂xj += +∂ +∂xi +�� +µ + µt +σκ +� ∂κ +∂xi +� ++ Pκ − ρϵ +(8) +where, k = 1 +2 +�3 +i=1 +ρu′′ +i u′′ +i +ρ +is the turbulent kinetic energy, Pk is the production of kinetic energy. +∂ρϵ +∂t + ∂ρ �ujϵ +∂xj += +∂ +∂xi +�� +µ + µt +σϵ +� ∂ϵ +∂xi +� ++ Cϵ1 +ϵ +κPκ − Cϵ2ρϵ2 +κ Pκ +(9) +where, ϵ = ν +� +∂u′′ +i ∂u′′ +i +∂xjxj +is the turbulent disspation rate and the value of closure constants are as below. Cµ = +0.09, σk = 1, sigmaϵ = 1.3, Cϵ1 = 1.44, C2 = 1.92 The pressure is calculated from equation of state for ideal +gas law as, +p = ρR �T +(10) +where, R is universal gas constant and T is temperature. The distribution of species is calculated by species +transport equation as below +∂ρi �Yi +∂t ++ ∂ρi �ui �Yi +∂xi += +∂ +∂xi +� +−ρµeff +∂ �Yi +∂xi +� +(11) +where, Yi is species mass-fraction and is given as, +Yi = ρi +ρ +(12) +8 + +The distribution of temperature field is calculated from the energy equation as below +∂ρ �E +∂t ++ ∂ρ �uj �E +∂xj ++ ∂ �ujp +∂xj += − ∂ �qj +∂xj ++ ∂ �uj � +τij +∂xj +(13) +where, E is the total energy which includes internal energy e, kinetic energy K and turbulent kinetic energy +k. The heat flux is defined as, +qj = −cpµeff +Pr +∂T +∂xi ++ �qr +(14) +cp depends on temperature and are taken from JANAF table of thermodynamics and given as below, +cp = R((((a4T + a3)T + a2)T + a1)T + a0) +(15) +a0, a1, a2, a3, a4 are constants of polynomial, +qr = +� ∞ +0 +� +4π +Iη(ˆs) |ˆn · ˆs| dΩ dη +(16) +where qr is the radiative heat flux which can be calculated on the wall, ˆn is the surface normal vector, +∂qr/∂xj is the divergence of radiative heat flux and can be calculated as, +∇ · q = +� ∞ +0 +κη +� +4πIbη − +� +4π +Iη dη +� +dη +or +∇ · q = +� ∞ +0 +κη (4πIbη − Gη) dη +(17) +where η is the wavenumber, Ibη is the Planck function and κη is the spectral absorption coefficient, Gη is +spectral irradiation, Iη(ˆs) is the intensity field which is obtained by solving the radiative transfer equation +(RTE) as explained in the subsequent paragraph. The above equations are subject to boundary conditions +as given in table 1. +The intensity field in equation 17 is obtained by solving the spectral radiative transfer equation (s-RTE) +for absorbing emitting (not scattering) medium as, +dIη +ds = κηIbη − κηIη +(18) +9 + +the above equation is subjected to boundary condition, +Iη(rw, ˆs) = ϵwηIbη(rw) + 1 − ϵwη +π +� +ˆn·ˆs>0 +Iη(rw, ˆs) |ˆn · ˆs| dΩ +(ˆn · ˆs < 0) +(19) +where, ϵwη is the spectral wall emissivity, Iη is the spectral intensity along ˆsi, Ibη is the Planck function, κη +is the spectral absorption coefficient, η is the wavenumber, and Ω is the solid angle. The length scale of the +current problem is very small, i.e., the optical length τ = κηL << 1, this means that the absorptivity of +the medium is far less than 1, therefore, the most of the radiation energy will escape the medium without +getting absorbed. Thus, the radiative source term (Eq. 17) +� ∞ +0 +κη4πIbηdη << +� ∞ +0 +κηGηdη +The radiative source term Eq. 17 becomes +∇ · q = +� ∞ +0 +κη4πIbηdη +� ∞ +0 +Ibηdη +� ∞ +0 +Ibηdη = 4κpσT 4 +where, κp is the Planck mean absorption coefficient. Therefore, the solution for the present case can be +Table 1: Boundary conditions for plume with thermal radiation simulation +Fields +Inlet +Outlet +Wall +Pressure (p) +totalPressure +Po = P + 0.5 ρ U 2 +Po = 7.11 bar +fixedValue +P=1 atm +zeroGradient +∇P = 0 +Velocity (U) +pressureInletOutletVelocity +Po = P + 0.5 ρ U 2 +inflow: U = (0,0,0) +outflow: ∇U = 0 +inletOutlet +inflow: U = (0,0,0) +outflow: ∇U = 0 +noSlip +U = (0,0,0) +Temperature (T) +fixedValue T = 2000 K +zeroGradient +∇T = 0 +qc + qr = 0 [23] +Species (x) +fixedValue x = 1 +for pure H2O plume +zeroGradient +∇x = 0 +zeroGradient +∇x = 0 +10 + +obtained by Planck Mean absorption coefficient based radiation property model. Thus, the RTE becomes, +dIp +ds = κp · (Ib − Ip) , +(20) +with boundary conditions, +Ip = ϵwIb + 1 − ϵw +π +� +ˆn·ˆs>0 +Ip |ˆn · ˆs| dΩ +(ˆn · ˆs < 0) +(21) +The Planck mean absorption coefficients are calculated for the range of thermodynamic states of gases in +the certain intervals as mentioned in ([18]) and stored in the form of lookup table. Furthermore, interpolation +techniques are employed to calculate the absorption coefficient which are not available in the lookup table. +The radiative heat transfer, work done due to viscous forces and species transport models have been +added into the existing application ”sonicFOAM” of the OpenFOAM and named as ”radSonicFOAM”. The +algorithms of the new application is described below and has been extensively verified and validated as +explained in the subsequent section and finally, has been used for the estimating the thermal load on the +nozzle base plate. +2.2. Numerical Procedure and solution algorithm for solving plume flow with radiation +The above mass, momentum, species, energy and radiation transfer equation are discretized using finite +volume method [24]. Further second order upwind scheme is used for the face value interpolation and final set +of algebraic equation is solved iteratively, by the SIMPLE algorithm till the residual for mass, momentum, +species, energy and radiation reaches to 10−5 level. The algorithm of above solution method is stated below, +1. Initialize pressure, velocity, species and temperature field. +2. Solve mass, momentum, species transport and energy equations without radiation till convergence. +3. Using converged field, initialize intensity field. +4. Calculate Planck mean absorption coefficient from the converged field of temperature, pressure and +mole-fraction of species using the Planck mean look-up table and solve RTE till convergence. +5. Compute divergence of radiative heat flux. +6. Update the temperature field with radiation sink term. +11 + +7. Repeat 2 to 6 until all the fields reach at steady state. furthermore, the flow diagram of the above +algorithm is shown in fig 2. +3. Verification and validation studies +The above mathematical modelling and solution algorithm are verified in three steps +• The calculated radiative properties are verified. +• The incompressible flow solution is verified with the published result. +• The radiative heat flux on the base plate is verified from the assumed shape of the plume in the sections +below. +3.1. Verification of Planck mean absorption coefficient of pure H2O and CO2 +The Planck mean absorption coefficients obtained for H2O and CO2 for various temperatures from +HITEMP-2010 using in-house C++ code [25, 26, 27], match with good agreement from Chu et al. [28]as +in Figure 3. +The Planck mean absorption coefficient of H2O decreases exponentially with increase in +temperature, whereas it first increases up to a temperature of 750 K then decreases till 2000 K for CO2. +The Planck mean absorption coefficient of H2O is higher than CO2 at lower temperatures, however, this is +opposite for higher temperature. This difference, decreases with increase in temperatures of compressible +flow. +3.2. Validation of compressible flow field +Darwish et al. [1] have designed a convergent divergent (C-D) nozzle using one dimensional flow isentropic +relations for perfect expansion conditions for air. The designed C-D nozzle has an exit diameter of 2.5 mm +and throat diameter of 1.98 mm, thus the area ratio Ar = 1.5942. The schematic diagram of C-D nozzle +with buffer section where flow eminates is shown in Fig. 1. They simulated the flow using OpenFOAM for +axisymmetric geometry for this nozzle along with the buffer zone. They further performed experiments to +visualize the flow using shadow-graphic technique. In the present study, we will be using the same nozzle +to validate pressure based compressible flow application ”sonicFOAM”. The air is allowed to expand from +7.1 atm pressure and 288 K to a quiescent medium at 1 atm pressure. The boundary conditions used for +this case is same as given in Table 1 except the temperature at the inlet is 288 K and the walls are at +zeroGradient (∇ · T = 0) boundary condition for temperature. +The flow is simulated for axisymmetric +12 + +Figure 2: Flow chart for the solution of high temperature and pressure plume flow with radiation +13 + +Start +Converged p,T,u,x without radiation +Time loop +Initialize intensity field +Obtain absorption coefficient from look-up +table and solve RTE to obtain V.q +No +Solve mass, momentum, species and energy +equation with V.q to obtain T +Converged ? +t-t+△t +Yes +Reached steady +state ? +No +Yes +EndFigure 3: Variation of Planck mean absorption coefficient of pure H2O and CO2 with different temperature at 1 bar pressure +geometry by creating a wedge of angle θ = 2.5o of unit cell in θ direction. It contains 38,400 cells and the +distance of first cell center from the wall is maintained at y+ ≈ 30. The standard k − ϵ model has been +used to model turbulence. Pressure-implicit split algorithm (PISO) is used to solve the governing flow and +energy equations. Thermophysical and transport properties for air are taken constant as, Cp = 1005kJ/kgK, +γ = 1.4, µ = 1.789 × 10−5PaS and Pr = 0.7. The time step used for the present simulation is 10−8 s. The +simulation has been performed for 7ms. The pressure and Mach number variation along centerline of nozzle +along with the results reported by Darwish et al. [1], are plotted in Figure. 4 and 5, respectively. The present +results are in good agreement with the literature results . There are no shocks or sudden discontinuities +inside the nozzle as the flow is perfectly expanded inside the nozzle. Since, the nozzle is designed with 1D +isentropic calculations and the present simulations are performed for 2D axisymmetric case, there is deviation +from 1D isentropic flow. Thus the small expansion and compression waves are formed which create small +diamond pattern that can be seen in profiles of pressure and Mach number along the axis of geometry. +14 + +50 +Present Calculations +45 +B- +Chuetal. +40 +35 +30 +co. +25 +20 +15 +H.O +10 +5 +500 +1000 +1500 +2000 +T (K)Figure 4: Variation of pressure along the axis of geometry +Figure 5: Variation of Mach number along the axis of +geometry +3.3. Verification of Rocket base plate heating with assumed plume shape +The axisymmetric approximation for RTE has been tested for rocket base plate heating problem from fixed +plume shape. The plume is assumed as connical shape with half cone angle of 15o having non-dimensional +length Z/R = 50 as shown in Figure. 6. The temperature of the plume Tp is uniform. The environment is +assumed to be cold and non-participating i.e., κ = 0 and the absorption coefficient of plume is κ = 0.5 m−1. +Figure 7, shows the radiative heat flux at the base plate from exhaust plume by both axisymmetric +and three-dimensional calculations. The result obtained from 3D simulations is in good agreement with +the results published by Baek and Kim [6], whereas axisymmetric simulation result of radiative transfer +equations is very far from the result published. This requires reformulation of axisymmetric approximation +of radiative heat transfer in OpenFOAM. Therefore, a three dimensional geometry has been used for the +further simulations as shown in Figure. 8a. +4. Results and discussion +The heating of rocket base plate by thermal radiation from different plumes made of constituents of +pure H2O plume, CO2 plume and 50%- 50% mixture of H2O and CO2 plume are studied numerically +with OpenFOAM, an open source CFD package. +The present simulations are carried out on a full 3D +geometry with a pressure based compressible flow application sonicRadFoam. It has additional features +than existing sonicFoam, like work done due to viscous forces in energy equation, species transport equation +and emission/absorption due to gaseous radiation. The Planck mean radiation heat transfer model with +15 + +PresentSimulations +B-Darwishetal +6 +5 +Pressure (bar) +3 +2 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzle Central Axis (mm)2 +1.8 +1.6 +1.4 +MachNumber +1.2 +0.8 +0.6 +PresentSimulations +0.4 +B-- Darwish etal. +0.2 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +NozzleCentralAxis(mm)Figure 6: Geometry of conical plume +Figure 7: Variation of non-dimensional radiative heat flux +by axisymmetric and 3D RTE solution at the base plate +from assumed plume shape +(a) +(b) +Figure 8: (a) Three dimensional geometry and meshing for simulation of plumes with radiation; (b)Cross sectional view of three +dimensional geometry +multidimensional linear interpolation technique for properties is also incorporated to perform radiation heat +transfer calculations due to validity of optically thin approximation. +The results of thermal load on the rocket base plate from exhaust plume of three different constituents, +i.e., pure H2O plume, pure CO2 plume and 50%-50% mixture of H2O and CO2 plume are presented in the +subsequent sections. +4.1. Pure H2O plume +Pure H2O plume is formed by the combustion of pure H2 with liquid oxidizer LOX. The resulting +product contains mole-fraction of H2O (x = 1) which emanates from the nozzle in the form of the plume. +Initially the medium is filled with N2, and the H2O expands from 7.11 bar and 2000 K to a quiescent medium +16 + +Plumeemission +Environment +ExhaustPlume +R +15° +BasePlate +Z0.8 +3Dcalculation +0.7 +Axisymmetriccalculation +BaekandKim +0.6 +0.5 +-10/b +0.4 +0.3 +0.2 +ACAAC +0.1 +1.5 +2 +2.5 +3 +3.5 +4 +4.5 +5 +5.5 +6 +r/Rof 1 atm and 288 K Temperature. +The pressure remains constant in the convergent part of the nozzle, however it suddenly decreases at the +throat and the divergent part of the nozzle as shown in Figure.10a. The exit pressure at nozzle for H2O +plume is slightly higher than the pressure of quiescent medium, i.e., 1.4 bar, this essentially means that the +flow is underexpanded [29]. Due to this underexpansion scenario, there forms the series of strong expansion +and compression waves (oblique shocks) which evolves from the lip of the nozzle, as pressure tries to adjust +itself against medium pressure. The shock which evolve from the lip of the nozzle is in the shape of barrel so +it can be called as ”barrel shock” and a Mach disc appears after the shock which is formed due to singular +reflection. The pressure variation in divergent part of the nozzle enables the temperature reduction as shown +in Figure. 10b. Similar effect of pressure variation in the plume is seen on the temperature variation as +well. Thus, the temperature variation in the divergent part of the nozzle and in the plume enables the +heat transfer mechanism. However, heat transfer mechanism does not occur in the convergent part of the +nozzle, due to uniform temperature inside the convergent part of the nozzle. The physical quantities such +as pressure, temperature and velocity or Mach number vary rapidly across the shock. The shock pattern +is in the form of a diamond also known as diamond flow structure. The pressure varies between 1.4 bar to +0.58 bar across the shock as in Figure 10a. Similarly, the temperature also varies sharply, i.e., upto 300 K +in the region from 23 mm to 25 mm as it can be seen from temperature profile across the axis in Fig. 10b. +The temperature first decreases due to expansion of gases and then it increases due to compression wave +and this pattern continues till pressure comes in equilibrium with the buffer zone pressure. After 40 mm, +flow stabilizes, as the pressure of fluid at that point becomes same as that of medium pressure. The trend +is opposite for Mach number as gas expands, the velocity of the flow increases and the maximum value of +Mach number achieved in this case is 2.25. The contour of Mach number and its profile along the centerline +distribution are shown in Fig. 9c and 10c, respectively. In the near field region of plume, after the inviscid +core central region, there forms a mixing layer where viscosity effects are felt and the primary species (H2O) +starts getting mixed with the atmospheric species (N2) and forms shear layer. The region just outside the +nozzle where species starts mixing is called as entrainment region of the plume. Moving downstream in the +direction of the flow, mixing layer widens for H2O being lighter molecule (molecular weight=18), as in Fig. +9d. In the far field region, i.e., the region after the shock, species mixes completely till the centerline as it +can be seen in the H2O and N2 profiles along the centerline. Fig. 10d shows the profiles of H2O and N2 +along the axis and contours of H2O and N2 are represented in the Figs. 9d and 9e, respectively. +The pressure, temperature and the species concentration of H2O contours constitutes the thermodynamic +17 + +state of the H2O vapour, and Planck mean absorption coefficient of H2O has been accessed through lookup +tables and its contours is shown in Figure.11a. It has very high value in the convergent portion of the nozzle +due to very high pressure and decreases as pressure decreases in the divergent section of the nozzle and its +value is further reduced in the plume. The absorption coefficient is zero where only N2 gas is available plume +being very small thickness, the reabsorption does not occur and the major emission comes from the core of +the plume, as emission and absorption are almost same in the shear layer as the divergence of radiative heat +flux is almost zero in the shear layer and the regions of zero absorption coefficient as shown in Figure. 11b. +One thing to notice that the range of divergence of radiative flux is negative to positive, both the positive +value of the divergence of radiative flux reveals radiative sink term while negative value tells radiative source +term, Thus, radiation is heating the gas inside the divergent part of the nozzle while it is cooling the plume. +Further the energy is transferred by radiation mode of heat transfer to other region without any change. +The high temperature plume after emanating from the nozzle gets diffused and develop very high flux +and temperature in a very narrow region around the lip of the nozzle on the base plate. Barring this region, +the base plate receives the radiation energy emanating from the shear layer of plume. The radiative heat +flux on the base plate is shown in Fig. 12a, baring some region near to the lip of the nozzle. The maximum +value of radiative heat flux is 1300 W/m2 and it decreases along the radial direction as the view factor of +plume decreases. Similarly, the temperature developed due to this radiative flux is shown in Fig. 12b. The +maximum value which base plate attains due to radiation energy is 308 K and it decreases in the similar +manner of radiation flux along the radius. +4.2. Pure CO2 plume +Although generation of pure CO2 plume is not very much realistic, however, for the theoretical understanding +the simulation has been performed for pure CO2 plume. The simulations for pure CO2 are performed by +supplying pure CO2 (x = 1) at the inlet of the nozzle and rest conditions are kept same as that of H2O +plume. This is also the case of underexpansion, so pressure at the lip of the nozzle varies from 1.4 bar to +0.5 bar across the shocks. There is a formation of Mach disc at the end of the first shock. The contour of +pressure and distribution of pressure along the centerline is shown in Fig. 13a and 14a, respectively. The +temperature drop across the shock in the CO2 plume is less compared to H2O, however there is more drop +in temperature towards the end of the plume. +The temperature contour is shown in fig. 13b. The variation of temperature across the first shock is not +much drastic in comparison to H2O plume, also this plume cools faster than H2O plume i.e., minimum +temperature of this plume is 1200 K at the ends while it is 1350 K for H2O plume. The Mach number +18 + +(a) +(b) +(c) +(d) +(e) +Figure 9: The contours of (a) Pressure (b) Temperature (c) Mach number (d) H2O (e) N2 for pure H2O plume +19 + +p(N/m2) +7.11e+05 +6.50e+5 +6.00e+5 +5.50e+5 +5.00e+5 +4.50e+5 +4.00e+5 +3.50e+5 +3.00e+5 +2.50e+5 +2.00e+5 +1.50e+5 +1.00e+5 +5.76e+04T(K) +2000 +1900 +1800 +1700 +1600 +1500 +1400 +1300 +1200 +1100 +1000 +006 +800 +700 +600 +500 +400 +295Ma +2.22 +2.00 +1.80 +1.60 +1.40 +1.20 +1.00 +0.80 +0.60 +0.40 +0.20 +0.00H20 +1.00 +0.90 +0.80 +0.70 +0.60 +0.50 +0.40 +0.30 +0.20 +0.10 +0.00N2 +1.00 +0.90 +0.80 +0.70 +0.60 +0.50 +0.40 +0.30 +0.20 +0.10 +0.00(a) +(b) +(c) +(d) +Figure 10: Profile of (a) Pressure (b) Temperature (c) Mach number (d) Species along the centerline for pure H2O plume +20 + +8 +6 +5 +Pressure (bar) +3 +2 +1 +0 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzlecentralaxis(mm)2100 +2000 +1900 +1800 +Temperature (K) +1700 +1600 +1500 +1400 +1300 +1200 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzle central axis (mm)2.5 +2.25 +2 +1.75 +Machnumber +1.5 +1.25 +0.75 +0.5 +0.25 +0 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzlecentralaxis(mm)0.9 +0.8 +0.7 +0.6 +H,O +0.5 +N. +0.4 +0.3 +0.2 +0.1 +25 +35 +40 +45 +50 +Nozzlecentral axis (mm)(a) +(b) +Figure 11: The contours of (a) Absorption coefficient (b) Divergence of radiative heat flux for pure H2O plume +(a) +(b) +Figure 12: Profile of (a) Radiative heat flux (b) Temperature along the radius of the base plate for pure H2O plume +21 + +k(m-1) +5.60 +5.00 +4.50 +4.00 +3.50 +3.00 +2.50 +2.00 +1.50 +1.00 +0.50 +0.00(gw/m)b- A +2.63e+06 +2.00e+6 +1.50e+6 +-1.00e+6 +5.00e+5 +0.00 +-5.00e+5 +-1.00e+6 +-1.50e+6 +-2.00e+6 +-2.50e+6 +-3.00e+6 +-3.76e+061400 +1200 +1000 +q,(W/m") +800 +600 +400 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +Radiusofbaseplate(mm)310 +309 +308 +307 +Temperature (K) +306 +305 +304 +303 +302 +301 +300 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +Radiusof baseplate (mm)(a) +(b) +(c) +(d) +(e) +Figure 13: Contours of (a) Pressure (b) Temperature (c) Mach number (d) CO2 (e) N2 for pure CO2 plume +22 + +p(N/m2) +7.11e+05 +6.50e+5 +6.00e+5 +5.50e+5 +5.00e+5 +4.50e+5 +4.00e+5 +3.50e+5 +3.00e+5 +2.50e+5 +2.00e+5 +1.50e+5 +1.00e+5 +5.84e+04T (K) +2000 +1900 +1800 +1700 +1600 +1500 +1400 +1300 +1200 +1100 +1000 +900 +800 +700 +600 +500 +400 +292Ma +2.22 +2.00 +1.80 +1.60 +1.40 +1.20 +1.00 +0.80 +0.60 +0.40 +0.20 +0.00CO2 +1.00 +0.90 +0.80 +0.70 +0.60 +0.50 +0.40 +0.30 +0.20 +0.10 +0.00N2 +1.00 + 0.90 + 0.80 +0.70 + 0.60 +0.50 +0.40 +0.30 +0.20 +0.10 +0.00(a) +(b) +(c) +(d) +Figure 14: Profile of (a) Pressure (b) Temperature (c) Mach number (d) Species for pure CO2 plume +23 + +8 +6 +5 +Pressure (bar) +4 +3 +2 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzlecentralaxis(mm)2100 +2000 +1900 +1800 +1700 +Temperature( +1600 +1500 +1400 +1300 +1200 +1100 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzlecentralaxis(mm)2.5 +2.25 +2 +1.75 +Machnumber +1.5 +1.25 +0.75 +0.5 +0.25 +0 +0 +5 +10 +15 +20 +25 +30 +35 +40 +45 +50 +Nozzle central axis (mm)0.9 +0.8 +0.7 +Species distribution +0.6 +CO2 +0.5 +N2 +0.4 +0.3 +0.2 +0.1 +10 +15 +25 +30 +35 +40 +45 +50 +Nozzlecentralaxis(mm)(a) +(b) +Figure 15: Contours of (a) Absorption coefficient (b) Divergence of radiative heat flux for pure CO2 plume +(a) +(b) +Figure 16: Profile of (a) Radiative heat flux (b) Temperature along the radius of base plate for pure CO2 plume +24 + +k(m-1) +30 +28 +26 +24 +22 +20 +18 +16 +14 +12 +10 +8 +6 +4 +2 +0(gw/m)b- A +1.79e+07 +1.60e+7 +1.40e+7 +1.20e+7 +1.00e+7 +8.00e+6 +6.00e+6 +4.00e+6 +2.00e+6 +0.00 +-2.00e+6 +-4.00e+6 +-6.00e+6 +-8.00e+6 +-1.00e+7 +-1.20e+7 +-1.42e+074500 +4000 +3500 +3000 +2500 +2000 +1500 +1000 +500 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +Radius ofbaseplate(mm)325 +320 +315 +Temperature (K) +310 +305 +300 +295 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +Radiusofbaseplate(mm)(a) +(b) +Figure 17: Profile of (a) Radiative heat flux (b) Temperature on base plate along the radius of base plate for 50-50% mixture +of CO2 − H2O plume +contour and its distribution along the centerline are shown in Fig. 13c and 14c, respectively. The diffusion +of CO2 in N2 is less in comparison to H2O due to higher molecular weight of CO2 (44) compared to H2O +(18) as shown in Fig. 14d. The contours of CO2 and N2 mole fraction are shown in Fig. 13d and 13e, +respectively. +The absorption coefficient distribution by considering Planck mean absorption coefficient for CO2 plume +is shown in Fig. +15a. +Its value is almost zero everywhere except in the core of the plume and in the +shear layer. As the absorption coefficient of CO2 is higher in the shear layer compared to H2O plume, the +radiative heat flux on the rocket base plate is also higher, i.e., around 4000 W/m2 as shown in Fig. 16a. The +corresponding temperature distribution on the base plate is shown in Fig. 16b, having a maximum value of +323 K, barring the diffusion region. +4.3. Mixture plume (50 % H2O and 50 % CO2) +The combustion of hydrocarbon fuel with liquid oxidizer (LOX) gives 50-50% mixture of CO2 and H2O. +Thus, for the present problem, we supply 50-50% mixture of both CO2 and H2O at the inlet of the nozzle +and other conditions are kept same as previous cases for the simulation of this plume. This is also a case +of underexpanded plume. The temperature variation along the centerline at the end of the buffer section is +somewhat the average of both pure CO2 and H2O plume. +The radiative transfer calculations are performed to determine the heat flux on the base plate from +25 + +2400 +2200 +2000 +1800 +1600 +(w/M)"b +1400 +1200 +1000 +800 +600 +400 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +Radiusofbaseplate(mm)314 +313 +312 +311 +310 +309 +308 +307 +306 +305 +304 +303 +302 +301 +300 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +Radius ofbaseplate(mm)CO2 − H2O plume. The maximum radiative heat flux on the base plate is 2300 W/m2 (Fig. 17a) and it +decays with the radius of the base plate. The corresponding profiles of the temperature on the base plate +is shown in Fig. 17b. It is noted that the flux and temperature profiles for CO2 and mixture plume are +exponential decaying with radius, while it is almost linear for H2O. This is owing to the fact that, high +diffusion of H2O causes more spreading of H2O and this emission from H2O has high view factor, while this +is not the case with CO2 and mixture plume. +5. Conclusions +The thermal load calculation on the base plate of nozzle from exhaust plume is performed in OpenFOAM. +The ability of pressure based compressible flow application, ”sonicFoam” is tested to capture the flow fields +for air expanding in a convergent divergent nozzle. The stagnation pressure and temperature at the inlet +of the nozzle are 7.11 bar and 288 K due to which flow expands and achieves Mach 2.1 at the exit of the +nozzle. The resulting pressure and Mach number variation at the centerline matches well with the standard +published results. +The same nozzle is then used with elevated stagnation temperature of 2000 K and same pressure at inlet, +to estimate the heat load on base plate for three different plumes namely, pure H2O plume, pure CO2 plume +and mixture plume. The ”sonicFoam” application is then modified by incorporating the work done due to +viscous forces, species transport equation and finally clubbed with the RTE solver fvDOM along with Planck +mean absorption emission model and named as ”radSonicFOAM”. All three plumes exit from the nozzle at +underexpanded flow conditions, where exit pressure is higher than the back pressure. The expansion waves +start from the lip of the nozzle due to which the temperature decreases as flow exit from the nozzle and +Mach number increases to a maximum value of 2.25. +The maximum amount of heat load in the present study due to thermal radiation on base plate is from +pure CO2 plume, i.e., 4000 W/m2 due to the high value of absorption coefficient, barring the diffusion zone. +This flux heats up the base plate and its temperature rises upto 323 K, followed by mixture plume, which +receives maximum radiative heat flux of 2300 W/m2 and the corresponding rise in temperature is 312 K. +For pure H2O plume, the heat flux is least, i.e., 1300 W/m2 with temperature rise of 308 K. For different +plumes the variation in flux is different and this is mostly due to the difference in the absorption coefficient +of the gases. Further, their molecular weights are also different, due to which there is difference in the flow +field of the gases and also the different nature of flux and temperature variations on the nozzle base plate. +Due to small length scale, the current case falls in optically thin regime, thus, the Planck mean absorption +26 + +model provides the satisfactory results, however, Planck mean absorption model may not be useful for other +cases with big length scale. Therefore, The full spectrum radiative properties models are needed with the +properties for all thermodynamic states existing in the plume. Furthermore, the solid fuel emanates particles +which contribute most of the radiative thermal load on the nozzle base plate, therefore the current radiation +heat transfer feature needs to further enhance by including the scattering model. +References +[1] M. Darwish, L. Orazi, D. Angeli, Simulation and analysis of the jet flow patterns from supersonic nozzles +of laser cutting using openfoam, The International Journal of Advanced Manufacturing Technology +102 (9) (2019) 3229–3242. +[2] F. Simmons, +Rocket exhaust plume phenomenology, +American +Institute +of Aeronautics +and +Astronautics, Inc., 2000. +[3] M. F. Modest, Radiative heat transfer, Academic press, 2013. +[4] C. Tien, M. Abu-Romia, A method of calculating rocket plume radiation to the base region, Journal of +Spacecraft and Rockets 1 (4) (1964) 433–435. +[5] H. Nelson, Backward monte carlo modeling for rocket plume base heating, Journal of Thermophysics +and Heat Transfer 6 (3) (1992) 556–558. +[6] S. W. Baek, M. Y. Kim, Analysis of radiative heating of a rocket plume base with the finite-volume +method, International Journal of Heat and Mass Transfer 40 (7) (1997) 1501–1508. +[7] H.-P. Tan, Y. Shuai, S.-K. Dong, Analysis of rocket plume base heating by using backward monte-carlo +method, Journal of thermophysics and heat transfer 19 (1) (2005) 125–127. +[8] J. Everson, H. Nelson, Rocket plume radiation base heating by reverse monte carlo simulation, Journal +of thermophysics and heat transfer 7 (4) (1993) 717–723. +[9] K. R. S. Sunil Kumar, Prediction of radiation from plumes, considering spatial temperature variations, +Heat Transfer Engineering 21 (1) (2000) 55–73. +[10] B. Gu, M. Y. Kim, S. W. Baek, Analysis of the ir signature and radiative base heating from a supersonic +solid rocket exhaust plume, International Journal of Aeronautical and Space Sciences 20 (2) (2019) 423– +432. +27 + +[11] L. S. Rothman, I. E. Gordon, A. Barbe, D. C. Benner, P. F. Bernath, M. Birk, V. Boudon, L. R. Brown, +A. Campargue, J.-P. Champion, et al., The hitran 2008 molecular spectroscopic database, Journal of +Quantitative Spectroscopy and Radiative Transfer 110 (9-10) (2009) 533–572. +[12] S. Tashkun, V. Perevalov, Cdsd-4000: High-resolution, high-temperature carbon dioxide spectroscopic +databank, Journal of Quantitative Spectroscopy and Radiative Transfer 112 (9) (2011) 1403–1410. +[13] L. Rothman, I. Gordon, R. Barber, H. Dothe, R. Gamache, A. Goldman, V. Perevalov, S. Tashkun, +J. Tennyson, Hitemp, the high-temperature molecular spectroscopic database, Journal of Quantitative +Spectroscopy and Radiative Transfer 111 (15) (2010) 2139–2150. +[14] M. F. Modest, H. Zhang, The full-spectrum correlated-k distribution for thermal radiation from +molecular gas-particulate mixtures, Journal of heat transfer 124 (1) (2002) 30–38. +[15] C. Wang, W. Ge, M. F. Modest, B. He, A full-spectrum k-distribution look-up table for radiative transfer +in nonhomogeneous gaseous media, Journal of Quantitative Spectroscopy and Radiative Transfer 168 +(2016) 46–56. +[16] V. P. Solovjov, B. W. Webb, Slw modeling of radiative transfer in multicomponent gas mixtures, Journal +of Quantitative Spectroscopy and Radiative Transfer 65 (4) (2000) 655–672. +[17] S. Parvatikar, K. Khemani, P. Kumar, Benchmark test cases for non-gray radiative heat transfer +calculation using fsk look-up table, in: Journal of Physics: Conference Series, Vol. 2116, IOP Publishing, +2021, p. 012066. +[18] K. Khemani, S. Parvatikar, P. Kumar, Radiative heat transfer calculations using full spectrum k- +distribution method for benchmark test cases, S¯adhan¯a 48 (1) (2023) 1–18. +[19] K. Khemani, P. Kumar, Radiative heat transfer calculation for mixture of gases using full spectrum +k-distribution method, in: Journal of Physics: Conference Series, Vol. 2116, IOP Publishing, 2021, p. +012065. +[20] OpenCFD, OpenFOAM - The Open Source CFD Toolbox - User’s Guide, OpenCFD Ltd. (11 Apr. +2007). +[21] D. C. Wilcox, et al., Turbulence modeling for CFD, Vol. 2, DCW industries La Canada, CA, 1998. +[22] T. F. Edgar, R. M. Felder, J. McKenna, R. W. Rousseau, S. I. Sandier, R. C. Seagrave, Bird, stewart +and lightfoot: Transport phenomena. +28 + +[23] G. Chanakya, P. Kumar, Investigation of thermal adiabatic boundary condition on semitransparent +wall in combined radiation and natural convection, International Journal for Computational Methods +in Engineering Science and Mechanics 23 (4) (2022) 349–366. +[24] P. Kumar, Radiative heat transfer in a participating gray medium and its interaction with fluid flow, +Ph.D. thesis, Indian Institute of Technology Kanpur (January 2009). +[25] N. Bartwal, G. Chanakya, P. Kumar, Calculation of non-gray radiation transmissivity, absorptivity and +absorption coefficient of water vapour from hitemp-2010 database at high temperature, in: 6th Asian +Symposium on Computational Heat Transfer and Fluid Flow, IITM, Chennai, India, 2017. +[26] N. Bartwal, P. Kumar, Calculation of non-gray radiation transmissivity, absorptivity of carbon-dioxide +from hitemp2010 database at high temperature, in: 24th National & 2nd International ISHMT-ASTFE +Heat and Mass Transfer Conference, BITS-Pilani, Hyderabad, India, 2017. +[27] N. Bartwal, P. Kumar, Calculation of non-gray radiation absorptivity and absorption coefficient of +mixture of gases from hitemp-2010 database, in: International Heat Transfer Conference Digital Library, +Begel House Inc., 2018. +[28] H. Chu, M. Gu, H. Zhou, F. Liu, Calculations of narrow-band transimissities and the planck mean +absorption coefficients of real gases using line-by-line and statistical narrow-band models, Frontiers in +Energy 8 (1) (2014) 41–48. +[29] E. Franquet, V. Perrier, S. Gibout, P. Bruel, Free underexpanded jets in a quiescent medium: A review, +Progress in Aerospace Sciences 77 (2015) 25–53. +29 + diff --git a/6dE4T4oBgHgl3EQfBwu-/content/tmp_files/load_file.txt b/6dE4T4oBgHgl3EQfBwu-/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef2a3f98b942f671de35619a7e5172c040257d86 --- /dev/null +++ b/6dE4T4oBgHgl3EQfBwu-/content/tmp_files/load_file.txt @@ -0,0 +1,724 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf,len=723 +page_content='Estimation of thermal load on the nozzle base plate from small plumes at high temperature Kamal Khemani1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Pradeep Kumar1*,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Ganesh Natarajan2 1 Numerical Experiment Laboratory (Radiation & Fluid Flow Physics) Indian Institute of Technology Mandi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Himachal Pradesh,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 175075,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' India 2 Discipline of Mechanical Engineering,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Indian Institute of Technology Palakkad,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Palakkad,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kerala,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 678557,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' India Abstract A numerical study is performed to estimate thermal load on the nozzle base plate,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' which is in the upstream direction to the flow,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' from three hot plumes of pure (CO2),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' (H2O) and 50-50 (%) composition of (CO2) and (H2O) expanding through a convergent-divergent (CD) nozzle in a quiescent medium at 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1 bar pressure and 298K temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The base plate of the nozzle heats up due to thermal radiation, emitting from the hot gases in the form of plumes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The spectral radiative properties of major participating gases such as (CO2), (H2O) are calculated from HITEMP-2010 database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' A small CD nozzle which is designed for the perfect expansion of air by 1D calculation with nozzle throat diameter 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='98 mm and area ratio 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5942, is considered as the design of nozzle for present study [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' All three plumes are in the under-expanded state for this CD nozzle and hence expands rapidly at supersonic speed as the plumes exit from the nozzle and forms a series of expansion and compression waves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The hot plumes emanating from the nozzle develop very high temperature in a small vicinity around the base plate, due to diffusion and develop very high temperature on the base plate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Barring this region, the maximum amount of radiative flux on base plate for these three plumes, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', CO2 plume, mixture plume and H2O plume are 4000 W/m2, 2300 W/m2 and 1300 W/m2, respectively and the maximum temperature developed due to these corresponding fluxes are 323 K, 312 K and 308 K, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Keywords: Compressible flow, gas radiation, thermal load, underexpanded URL: pradeepkumar@iitmandi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='in (Pradeep Kumar1*) arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='04855v1 [physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='comp-ph] 12 Jan 2023 NOMENCLATURE English Symbols c1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' c2 First and second radiation constants cp Specific heat at constant pressure e Internal energy h Enthalpy k Thermal conductivity,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' turbulent kinetic energy ˆn Unit normal vector p Pressure q Heat flux s Direction vector t Time u Velocity x Cartesian coordinate coordinate Ar Area ratio Iη Spectral intensity Ibη Planck function R Universal gas constant Y Species mass-fraction Greek Symbols 2 βη Spectral extinction coefficient ϵ Emissivity,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' turbulent dissipation rate η Wavenumber κη Spectral absorption coefficient µ Dynamic viscosity ∇ · q Divergence of radiative heat flux Ω Solid angle φ Azimuthal angle Φ Scattering phase function ρ Density of fluid σsη Spectral scattering coefficient θ Polar angle τ Viscous stress tensor,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' transmissivity of gas,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' optical thickness Subscript b Blackbody c Conduction cv Convection eff Effective η Spectral g Gas k Turbulent kinetic energy r Radiation t Turbulent,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' total w Wall 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Introduction The exhaust plume from the nozzle is a product of high temperature and high pressure gases exiting from the combustion chamber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' These gases expand rapidly in the convergent divergent (CD) nozzle at supersonic velocities because of the conversion of thermal energy into kinetic energy, which generates the thrust to lift off the rocket.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The structure of the plume is non uniform, containing different flow regimes and supersonic shock patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' It appear as bright luminous flame which emits radiation in the visible, ultraviolet (UV) and infrared (IR) parts of the electromagnetic spectrum [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The major part of plume radiation comes from participating gases like CO2, CO and H2O which show strong emission of thermal radiation in the infrared region of the spectrum [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This heats up the base plate of the rocket and becomes the source of tracking by enemies in the case of missiles, fighter jets and combat aircrafts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tien and Abu-Romia [4] used analytical method to estimated the amount of radiative heat flux on the rocket base plate from exhaust CO2 and H2O gas plume with idealised physical models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They evaluated apparent emissivity at base plate from semi infinite cylinder shape for H2O gas plume for a temperature of 2000oR, pressure 1 atm and CO2 gas plume for a temperature of 2500oR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Nelson [5] used backward Monte Carlo method to estimate radiative heat flux on rocket base plate from exhaust plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They further studied the effect of cone angle of exhaust plume and scattering albedo on the base plate heating from plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The increase in cone angle increased the heat flux on the base plate whereas increase of albedo decreased the heat flux.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' However, increase in albedo increased the searchlight emission from plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Baek and Kim [6] calculated the heat load on the base plate from both exhaust plume and searchlight emission from the particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They used finite volume method to solve radiative transfer equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [7] conducted a study in which they changed the temperature distribution of plume from isothermal to non-isothermal and concluded that the thermal load on thebase plate reduced 2-3 times for non-isothermal plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They also observed that by increasing optical thickness of medium the amount of radiative flux on the wall increased.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Everson and Nelson [8] developed reverse Monte Carlo method to predict base plate heating from plume due to radiation and found that, reverse Monte Carlo was computationally more efficient than forward Monte Carlo method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This was owing to the fact that only the rays that strikes the target point was only considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' For calculations they used band models for gas spectrum and Henyey-Greenstein function for particle scattering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They performed reverse Monte Carlo calculations for four different cases which included pure scattering plume, gas only emission for main engine plume, solid rocket motor plume and a plume with non-uniform temperature which absorbs, emits and scatters, and finally found that majority of emission is due to alumina particles coming from the centre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' While, H2O and Al2O2 emitted radiation from the 4 center of the plume and moreover major contribution of emission came from Al2O3 particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar and Ramamurthy [9] estimated radiative heat load on the rocket base plate using forward Monte-Carlo technique for gray conical plume with axial and radial temperature variations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They found that the radiative heat flux changed drastically with the change in radial temperature profile also the amount of radiative heat flux decreased with the increase in altitude as plume cools down faster.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Similar arguments were given by Gu and Baek [10] as they examined radiative heat flux from WSGGM method for a solid rocket motor from which the thermal load was estimated by long plumes of 5 and 10 km.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Accurate modelling of heat transfer due to radiation is very necessary for safe and efficient designing of rocket.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Estimation of radiative properties of gases is crucial and the most important part in determining heat transfer due to radiation accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The radiative properties of participating gases can be calculated using some of the most popular spectral database like High Resolution Transmission Spectroscopic Molecular Absorption database (HITRAN) [11], Carbon-Dioxide Spectroscopic Database (CDSD) [12], High Temperature spectroscopic absorption parameter (HITEMP) [13] etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The spectral absorption coefficients are highly erratic in nature containing millions of spectral lines which attain same value multiple times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This unnecessarily increases the computational cost required to solve the radiation transfer equation (RTE) as the line-by-line method considers calculation for each and every line on the spectrum and is therefore, mostly used only for benchmarking purposes [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Many methods are proposed to reduce the computation resource requirements such as Full spectrum scaled and correlated k-Distribution (FSSK/FSCK) [14], Lookup based Full spectrum K-Distribution [15], Spectral line weight sum of gray gases [16] etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The accuracy of the above methods is well demonstrated for uniform composition of gases [17, 18], however, the variation in composition of gaseous and their mixture poses another level of challenge and further modelling is required [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' In order to use look up table based FSK method, some interpolation techniques should be adopted for the properties for current thermodynamic states of gases in the domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' It is evident from the above literature that only a few work is available to calculate the heat load on the rocket base plate, that to with fixed conical plume shape and radiative properties of gases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The general heat transfer applications like, combustion, rocket propulsion, gasification contain numerous thermodynamic states, thus it is useful to generate a database for absorption coefficient at different temperatures, pressures and mole-fractions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The present case is optically thin thus, the RTE is solved using the Planck mean absorption coefficient at different thermodynamic states, from look-up table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The thermal load on the nozzle base plate has been calculated from the accurate solution of flow and temperature fields by solving complete set of governing equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The radiative property is obtained 5 from the HITEMP-2010 database, stored in the form of lookup table for range of thermodynamic states of gases and utilized during the solution of radiative transfer equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The thermodynamic states for which data is available can directly be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Further, the Planck mean absorption coefficient for unavailable thermodynamic states can easily be calculated by using multidimensional linear interpolation technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The fvDOM numerical method is used for solution of RTE coupled with fluid flow using a pressure based compressible flow application sonicRadFoam, modified from sonicFoam application of OpenFOAM [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Finally it includes the work done due to viscous forces, species transfer equation and RTE with Planck mean absorption-emission model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The manuscript is organised as section 2 describing the problem statement, and section 3 describing the mathematical models and governing differential equations followed by validation in section 4, results and discussions in section 5, and finally the present work is concluded in section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Problem description The convergent-divergent (CD) nozzle has throat diameter and an area-ratio of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='98 mm and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5942, respectively, and the length of convergent and divergent section is 7 mm and 14 mm, respectively as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 1 which also include the buffer zone for emanating the jet in the atmosphere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The base plate is attached at the end and the fluid expands from a stagnation pressure and temperature of 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11 bar and 2000 K, respectively, to a quiescent medium at the atmospheric condition of 1 atm pressure and 298K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The present CD nozzle designed for perfect expansion of air by one dimensional calculation, has been considered for the flow of three plumes whose constituents are pure CO2, pure water vapour and 50-50(%) CO2 and H2O from above pressure and temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Initially whole domain is filled with N2 gas at 1 atm pressure and 298 K temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The following assumptions have been considered for in the present study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Reynolds-averaged Navier-Stokes assumption is used to model turbulent flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The participating medium only absorbs or emits the thermal radiation but does not scatters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Refractive index of medium and walls are equal to one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Turbulence radiation interaction is neglected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Constant turbulent Prandtl number assumption has been used in the present study: 6 Figure 1: Schematic diagram of geometry for the calculation of the thermal load on the nozzle base plate from the hot plume 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Governing equations The density and temperature fluctuations must be accounted for compressible flow of a fluid along with velocity and pressure fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' To account for these factors, the mass based averaging commonly known as Favre averaging [21, 22], is used to describe the flow and energy transfer for compressible turbulent fluids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' which is defined as, �φ = ρφ ρ (1) where, ρ is the density of fluid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' φ is a scalar and the averaging of density is defined below, ρ = 1 T � T 0 ρ dT (2) ∂ρ ∂t + ∂ρ �ui ∂xi = 0 (3) ∂ρ �ui ∂t + ∂ρ �ui �uj ∂xj = − ∂p ∂xi + ∂� τij ∂xj (4) 7 Outlet BasePlate ww 5 Wall 7mm Inlet Axis 14mm 7 mm 28mmwhere, � τij = µeff � ∂ �ui ∂xj + ∂ �uj ∂xi − 2 3 δij ∂� uk ∂xk � − 2 3ρkδij (5) where, µeff is the effective dynamic viscosity of fluid which is the summation of molecular and turbulent dynamic viscosity of fluid i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e (µ + µt) and the molecular viscosity of gases is given by Sutherland µ = As T 3/2 T + Ts (6) As and Ts are Sutherland’s constants and depend on the type of gas and it’s molecules, and µt is the turbulent viscosity which is calculated as, µt = ρ Cµ k2 ϵ (7) where k is turbulent kinetic energy and ϵ is turbulent dissipation rate and Cµ is the closure constant and these are modelled by two equation (k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='ϵ) turbulence model and given as ∂ρκ ∂t + ∂ρ �ujκ ∂xj = ∂ ∂xi �� µ + µt σκ � ∂κ ∂xi � + Pκ − ρϵ (8) where, k = 1 2 �3 i=1 ρu′′ i u′′ i ρ is the turbulent kinetic energy, Pk is the production of kinetic energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ∂ρϵ ∂t + ∂ρ �ujϵ ∂xj = ∂ ∂xi �� µ + µt σϵ � ∂ϵ ∂xi � + Cϵ1 ϵ κPκ − Cϵ2ρϵ2 κ Pκ (9) where, ϵ = ν � ∂u′′ i ∂u′′ i ∂xjxj is the turbulent disspation rate and the value of closure constants are as below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Cµ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='09, σk = 1, sigmaϵ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='3, Cϵ1 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='44, C2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='92 The pressure is calculated from equation of state for ideal gas law as, p = ρR �T (10) where, R is universal gas constant and T is temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The distribution of species is calculated by species transport equation as below ∂ρi �Yi ∂t + ∂ρi �ui �Yi ∂xi = ∂ ∂xi � −ρµeff ∂ �Yi ∂xi � (11) where,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Yi is species mass-fraction and is given as,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Yi = ρi ρ (12) 8 The distribution of temperature field is calculated from the energy equation as below ∂ρ �E ∂t + ∂ρ �uj �E ∂xj + ∂ �ujp ∂xj = − ∂ �qj ∂xj + ∂ �uj � τij ∂xj (13) where,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' E is the total energy which includes internal energy e,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' kinetic energy K and turbulent kinetic energy k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The heat flux is defined as,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' qj = −cpµeff Pr ∂T ∂xi + �qr (14) cp depends on temperature and are taken from JANAF table of thermodynamics and given as below,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' cp = R((((a4T + a3)T + a2)T + a1)T + a0) (15) a0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' a1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' a2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' a3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' a4 are constants of polynomial,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' qr = � ∞ 0 � 4π Iη(ˆs) |ˆn · ˆs| dΩ dη (16) where qr is the radiative heat flux which can be calculated on the wall,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ˆn is the surface normal vector,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ∂qr/∂xj is the divergence of radiative heat flux and can be calculated as,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ∇ · q = � ∞ 0 κη � 4πIbη − � 4π Iη dη � dη or ∇ · q = � ∞ 0 κη (4πIbη − Gη) dη (17) where η is the wavenumber,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Ibη is the Planck function and κη is the spectral absorption coefficient,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gη is spectral irradiation,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Iη(ˆs) is the intensity field which is obtained by solving the radiative transfer equation (RTE) as explained in the subsequent paragraph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The above equations are subject to boundary conditions as given in table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The intensity field in equation 17 is obtained by solving the spectral radiative transfer equation (s-RTE) for absorbing emitting (not scattering) medium as,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' dIη ds = κηIbη − κηIη (18) 9 the above equation is subjected to boundary condition,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Iη(rw,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ˆs) = ϵwηIbη(rw) + 1 − ϵwη π � ˆn·ˆs>0 Iη(rw,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ˆs) |ˆn · ˆs| dΩ (ˆn · ˆs < 0) (19) where,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' ϵwη is the spectral wall emissivity,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Iη is the spectral intensity along ˆsi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Ibη is the Planck function,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' κη is the spectral absorption coefficient,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' η is the wavenumber,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' and Ω is the solid angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The length scale of the current problem is very small, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', the optical length τ = κηL << 1, this means that the absorptivity of the medium is far less than 1, therefore, the most of the radiation energy will escape the medium without getting absorbed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Thus, the radiative source term (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 17) � ∞ 0 κη4πIbηdη << � ∞ 0 κηGηdη The radiative source term Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 17 becomes ∇ · q = � ∞ 0 κη4πIbηdη � ∞ 0 Ibηdη � ∞ 0 Ibηdη = 4κpσT 4 where, κp is the Planck mean absorption coefficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Therefore, the solution for the present case can be Table 1: Boundary conditions for plume with thermal radiation simulation Fields Inlet Outlet Wall Pressure (p) totalPressure Po = P + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 ρ U 2 Po = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11 bar fixedValue P=1 atm zeroGradient ∇P = 0 Velocity (U) pressureInletOutletVelocity Po = P + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 ρ U 2 inflow: U = (0,0,0) outflow: ∇U = 0 inletOutlet inflow: U = (0,0,0) outflow: ∇U = 0 noSlip U = (0,0,0) Temperature (T) fixedValue T = 2000 K zeroGradient ∇T = 0 qc + qr = 0 [23] Species (x) fixedValue x = 1 for pure H2O plume zeroGradient ∇x = 0 zeroGradient ∇x = 0 10 obtained by Planck Mean absorption coefficient based radiation property model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Thus, the RTE becomes, dIp ds = κp · (Ib − Ip) , (20) with boundary conditions, Ip = ϵwIb + 1 − ϵw π � ˆn·ˆs>0 Ip |ˆn · ˆs| dΩ (ˆn · ˆs < 0) (21) The Planck mean absorption coefficients are calculated for the range of thermodynamic states of gases in the certain intervals as mentioned in ([18]) and stored in the form of lookup table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Furthermore, interpolation techniques are employed to calculate the absorption coefficient which are not available in the lookup table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The radiative heat transfer, work done due to viscous forces and species transport models have been added into the existing application ”sonicFOAM” of the OpenFOAM and named as ”radSonicFOAM”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The algorithms of the new application is described below and has been extensively verified and validated as explained in the subsequent section and finally, has been used for the estimating the thermal load on the nozzle base plate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Numerical Procedure and solution algorithm for solving plume flow with radiation The above mass, momentum, species, energy and radiation transfer equation are discretized using finite volume method [24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Further second order upwind scheme is used for the face value interpolation and final set of algebraic equation is solved iteratively, by the SIMPLE algorithm till the residual for mass, momentum, species, energy and radiation reaches to 10−5 level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The algorithm of above solution method is stated below, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Initialize pressure, velocity, species and temperature field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Solve mass, momentum, species transport and energy equations without radiation till convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Using converged field, initialize intensity field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Calculate Planck mean absorption coefficient from the converged field of temperature, pressure and mole-fraction of species using the Planck mean look-up table and solve RTE till convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Compute divergence of radiative heat flux.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Update the temperature field with radiation sink term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 11 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Repeat 2 to 6 until all the fields reach at steady state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' furthermore, the flow diagram of the above algorithm is shown in fig 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Verification and validation studies The above mathematical modelling and solution algorithm are verified in three steps The calculated radiative properties are verified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The incompressible flow solution is verified with the published result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The radiative heat flux on the base plate is verified from the assumed shape of the plume in the sections below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Verification of Planck mean absorption coefficient of pure H2O and CO2 The Planck mean absorption coefficients obtained for H2O and CO2 for various temperatures from HITEMP-2010 using in-house C++ code [25, 26, 27], match with good agreement from Chu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [28]as in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The Planck mean absorption coefficient of H2O decreases exponentially with increase in temperature, whereas it first increases up to a temperature of 750 K then decreases till 2000 K for CO2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The Planck mean absorption coefficient of H2O is higher than CO2 at lower temperatures, however, this is opposite for higher temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This difference, decreases with increase in temperatures of compressible flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Validation of compressible flow field Darwish et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [1] have designed a convergent divergent (C-D) nozzle using one dimensional flow isentropic relations for perfect expansion conditions for air.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The designed C-D nozzle has an exit diameter of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 mm and throat diameter of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='98 mm, thus the area ratio Ar = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5942.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The schematic diagram of C-D nozzle with buffer section where flow eminates is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They simulated the flow using OpenFOAM for axisymmetric geometry for this nozzle along with the buffer zone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' They further performed experiments to visualize the flow using shadow-graphic technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' In the present study, we will be using the same nozzle to validate pressure based compressible flow application ”sonicFOAM”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The air is allowed to expand from 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1 atm pressure and 288 K to a quiescent medium at 1 atm pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The boundary conditions used for this case is same as given in Table 1 except the temperature at the inlet is 288 K and the walls are at zeroGradient (∇ · T = 0) boundary condition for temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The flow is simulated for axisymmetric 12 Figure 2: Flow chart for the solution of high temperature and pressure plume flow with radiation 13 Start Converged p,T,u,x without radiation Time loop Initialize intensity field Obtain absorption coefficient from look-up table and solve RTE to obtain V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='q No Solve mass, momentum, species and energy equation with V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='q to obtain T Converged ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' t-t+△t Yes Reached steady state ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' No Yes EndFigure 3: Variation of Planck mean absorption coefficient of pure H2O and CO2 with different temperature at 1 bar pressure geometry by creating a wedge of angle θ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5o of unit cell in θ direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' It contains 38,400 cells and the distance of first cell center from the wall is maintained at y+ ≈ 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The standard k − ϵ model has been used to model turbulence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Pressure-implicit split algorithm (PISO) is used to solve the governing flow and energy equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Thermophysical and transport properties for air are taken constant as, Cp = 1005kJ/kgK, γ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4, µ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='789 × 10−5PaS and Pr = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The time step used for the present simulation is 10−8 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The simulation has been performed for 7ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The pressure and Mach number variation along centerline of nozzle along with the results reported by Darwish et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [1], are plotted in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4 and 5, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The present results are in good agreement with the literature results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' There are no shocks or sudden discontinuities inside the nozzle as the flow is perfectly expanded inside the nozzle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Since, the nozzle is designed with 1D isentropic calculations and the present simulations are performed for 2D axisymmetric case, there is deviation from 1D isentropic flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Thus the small expansion and compression waves are formed which create small diamond pattern that can be seen in profiles of pressure and Mach number along the axis of geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 14 50 Present Calculations 45 B- Chuetal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 40 35 30 co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 25 20 15 H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='O 10 5 500 1000 1500 2000 T (K)Figure 4: Variation of pressure along the axis of geometry Figure 5: Variation of Mach number along the axis of geometry 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Verification of Rocket base plate heating with assumed plume shape The axisymmetric approximation for RTE has been tested for rocket base plate heating problem from fixed plume shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The plume is assumed as connical shape with half cone angle of 15o having non-dimensional length Z/R = 50 as shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The temperature of the plume Tp is uniform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The environment is assumed to be cold and non-participating i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', κ = 0 and the absorption coefficient of plume is κ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 m−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Figure 7, shows the radiative heat flux at the base plate from exhaust plume by both axisymmetric and three-dimensional calculations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The result obtained from 3D simulations is in good agreement with the results published by Baek and Kim [6], whereas axisymmetric simulation result of radiative transfer equations is very far from the result published.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This requires reformulation of axisymmetric approximation of radiative heat transfer in OpenFOAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Therefore, a three dimensional geometry has been used for the further simulations as shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 8a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Results and discussion The heating of rocket base plate by thermal radiation from different plumes made of constituents of pure H2O plume, CO2 plume and 50%- 50% mixture of H2O and CO2 plume are studied numerically with OpenFOAM, an open source CFD package.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The present simulations are carried out on a full 3D geometry with a pressure based compressible flow application sonicRadFoam.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' It has additional features than existing sonicFoam, like work done due to viscous forces in energy equation, species transport equation and emission/absorption due to gaseous radiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The Planck mean radiation heat transfer model with 15 PresentSimulations B-Darwishetal 6 5 Pressure (bar) 3 2 5 10 15 20 25 30 35 40 45 50 Nozzle Central Axis (mm)2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 MachNumber 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='6 PresentSimulations 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 B-- Darwish etal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2 0 5 10 15 20 25 30 35 40 45 50 NozzleCentralAxis(mm)Figure 6: Geometry of conical plume Figure 7: Variation of non-dimensional radiative heat flux by axisymmetric and 3D RTE solution at the base plate from assumed plume shape (a) (b) Figure 8: (a) Three dimensional geometry and meshing for simulation of plumes with radiation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' (b)Cross sectional view of three dimensional geometry multidimensional linear interpolation technique for properties is also incorporated to perform radiation heat transfer calculations due to validity of optically thin approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The results of thermal load on the rocket base plate from exhaust plume of three different constituents, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', pure H2O plume, pure CO2 plume and 50%-50% mixture of H2O and CO2 plume are presented in the subsequent sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Pure H2O plume Pure H2O plume is formed by the combustion of pure H2 with liquid oxidizer LOX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The resulting product contains mole-fraction of H2O (x = 1) which emanates from the nozzle in the form of the plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Initially the medium is filled with N2, and the H2O expands from 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11 bar and 2000 K to a quiescent medium 16 Plumeemission Environment ExhaustPlume R 15° BasePlate Z0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='8 3Dcalculation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='7 Axisymmetriccalculation BaekandKim 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 10/b 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2 ACAAC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 6 r/Rof 1 atm and 288 K Temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The pressure remains constant in the convergent part of the nozzle, however it suddenly decreases at the throat and the divergent part of the nozzle as shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='10a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The exit pressure at nozzle for H2O plume is slightly higher than the pressure of quiescent medium, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 bar, this essentially means that the flow is underexpanded [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Due to this underexpansion scenario, there forms the series of strong expansion and compression waves (oblique shocks) which evolves from the lip of the nozzle, as pressure tries to adjust itself against medium pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The shock which evolve from the lip of the nozzle is in the shape of barrel so it can be called as ”barrel shock” and a Mach disc appears after the shock which is formed due to singular reflection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The pressure variation in divergent part of the nozzle enables the temperature reduction as shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 10b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Similar effect of pressure variation in the plume is seen on the temperature variation as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Thus, the temperature variation in the divergent part of the nozzle and in the plume enables the heat transfer mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' However, heat transfer mechanism does not occur in the convergent part of the nozzle, due to uniform temperature inside the convergent part of the nozzle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The physical quantities such as pressure, temperature and velocity or Mach number vary rapidly across the shock.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The shock pattern is in the form of a diamond also known as diamond flow structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The pressure varies between 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 bar to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='58 bar across the shock as in Figure 10a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Similarly, the temperature also varies sharply, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', upto 300 K in the region from 23 mm to 25 mm as it can be seen from temperature profile across the axis in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 10b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The temperature first decreases due to expansion of gases and then it increases due to compression wave and this pattern continues till pressure comes in equilibrium with the buffer zone pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' After 40 mm, flow stabilizes, as the pressure of fluid at that point becomes same as that of medium pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The trend is opposite for Mach number as gas expands, the velocity of the flow increases and the maximum value of Mach number achieved in this case is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The contour of Mach number and its profile along the centerline distribution are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 9c and 10c, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' In the near field region of plume, after the inviscid core central region, there forms a mixing layer where viscosity effects are felt and the primary species (H2O) starts getting mixed with the atmospheric species (N2) and forms shear layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The region just outside the nozzle where species starts mixing is called as entrainment region of the plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Moving downstream in the direction of the flow, mixing layer widens for H2O being lighter molecule (molecular weight=18), as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 9d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' In the far field region, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', the region after the shock, species mixes completely till the centerline as it can be seen in the H2O and N2 profiles along the centerline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 10d shows the profiles of H2O and N2 along the axis and contours of H2O and N2 are represented in the Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 9d and 9e, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The pressure, temperature and the species concentration of H2O contours constitutes the thermodynamic 17 state of the H2O vapour, and Planck mean absorption coefficient of H2O has been accessed through lookup tables and its contours is shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' It has very high value in the convergent portion of the nozzle due to very high pressure and decreases as pressure decreases in the divergent section of the nozzle and its value is further reduced in the plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The absorption coefficient is zero where only N2 gas is available plume being very small thickness, the reabsorption does not occur and the major emission comes from the core of the plume, as emission and absorption are almost same in the shear layer as the divergence of radiative heat flux is almost zero in the shear layer and the regions of zero absorption coefficient as shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 11b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' One thing to notice that the range of divergence of radiative flux is negative to positive, both the positive value of the divergence of radiative flux reveals radiative sink term while negative value tells radiative source term, Thus, radiation is heating the gas inside the divergent part of the nozzle while it is cooling the plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Further the energy is transferred by radiation mode of heat transfer to other region without any change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The high temperature plume after emanating from the nozzle gets diffused and develop very high flux and temperature in a very narrow region around the lip of the nozzle on the base plate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Barring this region, the base plate receives the radiation energy emanating from the shear layer of plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The radiative heat flux on the base plate is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 12a, baring some region near to the lip of the nozzle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The maximum value of radiative heat flux is 1300 W/m2 and it decreases along the radial direction as the view factor of plume decreases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Similarly, the temperature developed due to this radiative flux is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 12b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The maximum value which base plate attains due to radiation energy is 308 K and it decreases in the similar manner of radiation flux along the radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Pure CO2 plume Although generation of pure CO2 plume is not very much realistic, however, for the theoretical understanding the simulation has been performed for pure CO2 plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The simulations for pure CO2 are performed by supplying pure CO2 (x = 1) at the inlet of the nozzle and rest conditions are kept same as that of H2O plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This is also the case of underexpansion, so pressure at the lip of the nozzle varies from 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 bar to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 bar across the shocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' There is a formation of Mach disc at the end of the first shock.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The contour of pressure and distribution of pressure along the centerline is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 13a and 14a, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The temperature drop across the shock in the CO2 plume is less compared to H2O, however there is more drop in temperature towards the end of the plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The temperature contour is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 13b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The variation of temperature across the first shock is not much drastic in comparison to H2O plume, also this plume cools faster than H2O plume i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', minimum temperature of this plume is 1200 K at the ends while it is 1350 K for H2O plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The Mach number 18 (a) (b) (c) (d) (e) Figure 9: The contours of (a) Pressure (b) Temperature (c) Mach number (d) H2O (e) N2 for pure H2O plume 19 p(N/m2) 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11e+05 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='76e+04T(K) 2000 1900 1800 1700 1600 1500 1400 1300 1200 1100 1000 006 800 700 600 500 400 295Ma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='22 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00H20 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00N2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00(a) (b) (c) (d) Figure 10: Profile of (a) Pressure (b) Temperature (c) Mach number (d) Species along the centerline for pure H2O plume 20 8 6 5 Pressure (bar) 3 2 1 0 0 5 10 15 20 25 30 35 40 45 50 Nozzlecentralaxis(mm)2100 2000 1900 1800 Temperature (K) 1700 1600 1500 1400 1300 1200 0 5 10 15 20 25 30 35 40 45 50 Nozzle central axis (mm)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='75 Machnumber 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25 0 0 5 10 15 20 25 30 35 40 45 50 Nozzlecentralaxis(mm)0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='6 H,O 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1 25 35 40 45 50 Nozzlecentral axis (mm)(a) (b) Figure 11: The contours of (a) Absorption coefficient (b) Divergence of radiative heat flux for pure H2O plume (a) (b) Figure 12: Profile of (a) Radiative heat flux (b) Temperature along the radius of the base plate for pure H2O plume 21 k(m-1) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00(gw/m)b- A 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='63e+06 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='76e+061400 1200 1000 q,(W/m") 800 600 400 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Radiusofbaseplate(mm)310 309 308 307 Temperature (K) 306 305 304 303 302 301 300 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Radiusof baseplate (mm)(a) (b) (c) (d) (e) Figure 13: Contours of (a) Pressure (b) Temperature (c) Mach number (d) CO2 (e) N2 for pure CO2 plume 22 p(N/m2) 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11e+05 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50e+5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='84e+04T (K) 2000 1900 1800 1700 1600 1500 1400 1300 1200 1100 1000 900 800 700 600 500 400 292Ma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='22 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00CO2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00N2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00(a) (b) (c) (d) Figure 14: Profile of (a) Pressure (b) Temperature (c) Mach number (d) Species for pure CO2 plume 23 8 6 5 Pressure (bar) 4 3 2 0 5 10 15 20 25 30 35 40 45 50 Nozzlecentralaxis(mm)2100 2000 1900 1800 1700 Temperature( 1600 1500 1400 1300 1200 1100 0 5 10 15 20 25 30 35 40 45 50 Nozzlecentralaxis(mm)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='75 Machnumber 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25 0 0 5 10 15 20 25 30 35 40 45 50 Nozzle central axis (mm)0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='7 Species distribution 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='6 CO2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='5 N2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1 10 15 25 30 35 40 45 50 Nozzlecentralaxis(mm)(a) (b) Figure 15: Contours of (a) Absorption coefficient (b) Divergence of radiative heat flux for pure CO2 plume (a) (b) Figure 16: Profile of (a) Radiative heat flux (b) Temperature along the radius of base plate for pure CO2 plume 24 k(m-1) 30 28 26 24 22 20 18 16 14 12 10 8 6 4 2 0(gw/m)b- A 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='79e+07 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='60e+7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='40e+7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20e+7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+7 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='00e+7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='20e+7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='42e+074500 4000 3500 3000 2500 2000 1500 1000 500 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Radius ofbaseplate(mm)325 320 315 Temperature (K) 310 305 300 295 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Radiusofbaseplate(mm)(a) (b) Figure 17: Profile of (a) Radiative heat flux (b) Temperature on base plate along the radius of base plate for 50-50% mixture of CO2 − H2O plume contour and its distribution along the centerline are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 13c and 14c, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The diffusion of CO2 in N2 is less in comparison to H2O due to higher molecular weight of CO2 (44) compared to H2O (18) as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 14d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The contours of CO2 and N2 mole fraction are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 13d and 13e, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The absorption coefficient distribution by considering Planck mean absorption coefficient for CO2 plume is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 15a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Its value is almost zero everywhere except in the core of the plume and in the shear layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' As the absorption coefficient of CO2 is higher in the shear layer compared to H2O plume, the radiative heat flux on the rocket base plate is also higher, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', around 4000 W/m2 as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 16a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The corresponding temperature distribution on the base plate is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 16b, having a maximum value of 323 K, barring the diffusion region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Mixture plume (50 % H2O and 50 % CO2) The combustion of hydrocarbon fuel with liquid oxidizer (LOX) gives 50-50% mixture of CO2 and H2O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Thus, for the present problem, we supply 50-50% mixture of both CO2 and H2O at the inlet of the nozzle and other conditions are kept same as previous cases for the simulation of this plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This is also a case of underexpanded plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The temperature variation along the centerline at the end of the buffer section is somewhat the average of both pure CO2 and H2O plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The radiative transfer calculations are performed to determine the heat flux on the base plate from 25 2400 2200 2000 1800 1600 (w/M)"b 1400 1200 1000 800 600 400 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Radiusofbaseplate(mm)314 313 312 311 310 309 308 307 306 305 304 303 302 301 300 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Radius ofbaseplate(mm)CO2 − H2O plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The maximum radiative heat flux on the base plate is 2300 W/m2 (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 17a) and it decays with the radius of the base plate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The corresponding profiles of the temperature on the base plate is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 17b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' It is noted that the flux and temperature profiles for CO2 and mixture plume are exponential decaying with radius, while it is almost linear for H2O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This is owing to the fact that, high diffusion of H2O causes more spreading of H2O and this emission from H2O has high view factor, while this is not the case with CO2 and mixture plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Conclusions The thermal load calculation on the base plate of nozzle from exhaust plume is performed in OpenFOAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The ability of pressure based compressible flow application, ”sonicFoam” is tested to capture the flow fields for air expanding in a convergent divergent nozzle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The stagnation pressure and temperature at the inlet of the nozzle are 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='11 bar and 288 K due to which flow expands and achieves Mach 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='1 at the exit of the nozzle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The resulting pressure and Mach number variation at the centerline matches well with the standard published results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The same nozzle is then used with elevated stagnation temperature of 2000 K and same pressure at inlet, to estimate the heat load on base plate for three different plumes namely, pure H2O plume, pure CO2 plume and mixture plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The ”sonicFoam” application is then modified by incorporating the work done due to viscous forces, species transport equation and finally clubbed with the RTE solver fvDOM along with Planck mean absorption emission model and named as ”radSonicFOAM”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' All three plumes exit from the nozzle at underexpanded flow conditions, where exit pressure is higher than the back pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The expansion waves start from the lip of the nozzle due to which the temperature decreases as flow exit from the nozzle and Mach number increases to a maximum value of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' The maximum amount of heat load in the present study due to thermal radiation on base plate is from pure CO2 plume, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', 4000 W/m2 due to the high value of absorption coefficient, barring the diffusion zone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' This flux heats up the base plate and its temperature rises upto 323 K, followed by mixture plume, which receives maximum radiative heat flux of 2300 W/m2 and the corresponding rise in temperature is 312 K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' For pure H2O plume, the heat flux is least, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', 1300 W/m2 with temperature rise of 308 K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' For different plumes the variation in flux is different and this is mostly due to the difference in the absorption coefficient of the gases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Further, their molecular weights are also different, due to which there is difference in the flow field of the gases and also the different nature of flux and temperature variations on the nozzle base plate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Due to small length scale, the current case falls in optically thin regime, thus, the Planck mean absorption 26 model provides the satisfactory results, however, Planck mean absorption model may not be useful for other cases with big length scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Therefore, The full spectrum radiative properties models are needed with the properties for all thermodynamic states existing in the plume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Furthermore, the solid fuel emanates particles which contribute most of the radiative thermal load on the nozzle base plate, therefore the current radiation heat transfer feature needs to further enhance by including the scattering model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' References [1] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Darwish, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Orazi, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Angeli, Simulation and analysis of the jet flow patterns from supersonic nozzles of laser cutting using openfoam, The International Journal of Advanced Manufacturing Technology 102 (9) (2019) 3229–3242.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [2] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Simmons, Rocket exhaust plume phenomenology, American Institute of Aeronautics and Astronautics, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [3] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Modest, Radiative heat transfer, Academic press, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [4] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tien, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Abu-Romia, A method of calculating rocket plume radiation to the base region, Journal of Spacecraft and Rockets 1 (4) (1964) 433–435.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Nelson, Backward monte carlo modeling for rocket plume base heating, Journal of Thermophysics and Heat Transfer 6 (3) (1992) 556–558.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [6] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Baek, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kim, Analysis of radiative heating of a rocket plume base with the finite-volume method, International Journal of Heat and Mass Transfer 40 (7) (1997) 1501–1508.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [7] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Shuai, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Dong, Analysis of rocket plume base heating by using backward monte-carlo method, Journal of thermophysics and heat transfer 19 (1) (2005) 125–127.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [8] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Everson, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Nelson, Rocket plume radiation base heating by reverse monte carlo simulation, Journal of thermophysics and heat transfer 7 (4) (1993) 717–723.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [9] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Sunil Kumar, Prediction of radiation from plumes, considering spatial temperature variations, Heat Transfer Engineering 21 (1) (2000) 55–73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [10] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Baek, Analysis of the ir signature and radiative base heating from a supersonic solid rocket exhaust plume, International Journal of Aeronautical and Space Sciences 20 (2) (2019) 423– 432.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 27 [11] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Rothman, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gordon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Barbe, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Benner, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Bernath, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Birk, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Boudon, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Brown, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Campargue, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Champion, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', The hitran 2008 molecular spectroscopic database, Journal of Quantitative Spectroscopy and Radiative Transfer 110 (9-10) (2009) 533–572.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [12] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tashkun, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Perevalov, Cdsd-4000: High-resolution, high-temperature carbon dioxide spectroscopic databank, Journal of Quantitative Spectroscopy and Radiative Transfer 112 (9) (2011) 1403–1410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [13] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Rothman, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gordon, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Barber, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Dothe, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gamache, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Goldman, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Perevalov, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tashkun, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Tennyson, Hitemp, the high-temperature molecular spectroscopic database, Journal of Quantitative Spectroscopy and Radiative Transfer 111 (15) (2010) 2139–2150.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [14] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Modest, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Zhang, The full-spectrum correlated-k distribution for thermal radiation from molecular gas-particulate mixtures, Journal of heat transfer 124 (1) (2002) 30–38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [15] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Wang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Ge, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Modest, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' He, A full-spectrum k-distribution look-up table for radiative transfer in nonhomogeneous gaseous media, Journal of Quantitative Spectroscopy and Radiative Transfer 168 (2016) 46–56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [16] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Solovjov, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Webb, Slw modeling of radiative transfer in multicomponent gas mixtures, Journal of Quantitative Spectroscopy and Radiative Transfer 65 (4) (2000) 655–672.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [17] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Parvatikar, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Khemani, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Benchmark test cases for non-gray radiative heat transfer calculation using fsk look-up table, in: Journal of Physics: Conference Series, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2116, IOP Publishing, 2021, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 012066.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [18] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Khemani, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Parvatikar, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Radiative heat transfer calculations using full spectrum k- distribution method for benchmark test cases, S¯adhan¯a 48 (1) (2023) 1–18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [19] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Khemani, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Radiative heat transfer calculation for mixture of gases using full spectrum k-distribution method, in: Journal of Physics: Conference Series, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2116, IOP Publishing, 2021, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 012065.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [20] OpenCFD, OpenFOAM - The Open Source CFD Toolbox - User’s Guide, OpenCFD Ltd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' (11 Apr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [21] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Wilcox, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', Turbulence modeling for CFD, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 2, DCW industries La Canada, CA, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [22] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Edgar, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Felder, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' McKenna, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Rousseau, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Sandier, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Seagrave, Bird, stewart and lightfoot: Transport phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 28 [23] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Chanakya, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Investigation of thermal adiabatic boundary condition on semitransparent wall in combined radiation and natural convection, International Journal for Computational Methods in Engineering Science and Mechanics 23 (4) (2022) 349–366.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [24] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Radiative heat transfer in a participating gray medium and its interaction with fluid flow, Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' thesis, Indian Institute of Technology Kanpur (January 2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [25] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Bartwal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Chanakya, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Calculation of non-gray radiation transmissivity, absorptivity and absorption coefficient of water vapour from hitemp-2010 database at high temperature, in: 6th Asian Symposium on Computational Heat Transfer and Fluid Flow, IITM, Chennai, India, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [26] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Bartwal, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Calculation of non-gray radiation transmissivity, absorptivity of carbon-dioxide from hitemp2010 database at high temperature, in: 24th National & 2nd International ISHMT-ASTFE Heat and Mass Transfer Conference, BITS-Pilani, Hyderabad, India, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [27] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Bartwal, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Kumar, Calculation of non-gray radiation absorptivity and absorption coefficient of mixture of gases from hitemp-2010 database, in: International Heat Transfer Conference Digital Library, Begel House Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=', 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [28] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Chu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Zhou, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Liu, Calculations of narrow-band transimissities and the planck mean absorption coefficients of real gases using line-by-line and statistical narrow-band models, Frontiers in Energy 8 (1) (2014) 41–48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' [29] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Franquet, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Perrier, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Gibout, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' Bruel, Free underexpanded jets in a quiescent medium: A review, Progress in Aerospace Sciences 77 (2015) 25–53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} +page_content=' 29' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6dE4T4oBgHgl3EQfBwu-/content/2301.04855v1.pdf'} diff --git a/8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf b/8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..67ef4705d392b15ba078cc953df71896631d31ba --- /dev/null +++ b/8NE3T4oBgHgl3EQfqQrl/content/2301.04651v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c438c38c09e6aa2d536506b68de2d9667fc4441860dd5c2749ff6da36696437f +size 2147291 diff --git a/8NE3T4oBgHgl3EQfqQrl/vector_store/index.pkl b/8NE3T4oBgHgl3EQfqQrl/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..8d5e372e48a75b7a52b99a5ba7b87f7e6b4acb20 --- /dev/null +++ b/8NE3T4oBgHgl3EQfqQrl/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3455354d525259c187b2ec3c9943f1d153e9e46a75f2631cb45a37be12de69f3 +size 67589 diff --git a/99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf b/99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e2ef08a4703efae054f5b66b3fb64fb4b05f156e --- /dev/null +++ b/99FLT4oBgHgl3EQfCS7y/content/2301.11975v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9279125764de599c9e7f33e8b33770f1d10c8f654992f12824fb23ec7f175795 +size 17283314 diff --git a/B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf b/B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..898e4d4e6c2b121c95ff17b8f96ee39eb97233ed --- /dev/null +++ b/B9AyT4oBgHgl3EQf4PrK/content/2301.00784v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c4c713222d46f92fe07cbb0b506d4720c3f35782ded08635b2f52be9de8094d +size 327589 diff --git a/B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss b/B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..37452f3f06ff476d93b2aeab047164f4b4ac9cba --- /dev/null +++ b/B9AyT4oBgHgl3EQf4PrK/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:808ec59431c2f58932e0de7e4da80d9bf064a6cd9abc7803e77d9aa092cf67fc +size 4390957 diff --git a/B9AyT4oBgHgl3EQf4PrK/vector_store/index.pkl b/B9AyT4oBgHgl3EQf4PrK/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b6cfe153be03c7ef3c3ac33d23d940c49de97aa3 --- /dev/null +++ b/B9AyT4oBgHgl3EQf4PrK/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52debdaf3e0ee9e7a2482f7b58591736b59ba569a99ef4d1c14aa68cfe4afc15 +size 154784 diff --git a/BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss b/BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..f4c141a99793c0a9656200244e359ce91eda5dea --- /dev/null +++ b/BdE1T4oBgHgl3EQfVgTd/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eae4b3cfca7cea50b8c9c3a02faede020f5b1da89af4ead337c0fe541c31d573 +size 8192045 diff --git a/BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf b/BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..22c82c26cb02f9d4b474e8d9dadc91de497b7ab5 --- /dev/null +++ b/BdFQT4oBgHgl3EQf9zeq/content/2301.13452v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ac4b5ceee3ca791739b8a1c83749b8821de098f3e8925ca32b212d2f4820daf +size 2564875 diff --git a/BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss b/BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..9b0fe2f25f87a5ae87dafb3a6aeb7870bdbe2468 --- /dev/null +++ b/BdFQT4oBgHgl3EQf9zeq/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e191334d0c9b3239f6fee0eb2cccfaa9c0abf698228899e96fac67247efe61c +size 5505069 diff --git a/BdFQT4oBgHgl3EQf9zeq/vector_store/index.pkl b/BdFQT4oBgHgl3EQf9zeq/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..185a7d7ce2847e3acebc72e24f6b875ccd039e90 --- /dev/null +++ b/BdFQT4oBgHgl3EQf9zeq/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da77230c64ff1c41c028594d1d6a572ed81ea3b5f6bf718dc9a19758f6c8803d +size 218817 diff --git a/CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf b/CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4e859a52272d6537592db187eb07cd38e407a858 --- /dev/null +++ b/CdE4T4oBgHgl3EQfeQ2g/content/2301.05098v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d66f38b7e61151b3b7cb7c47c3b814d0693494aec42e55334fc3c97144794147 +size 415674 diff --git a/CdE4T4oBgHgl3EQfeQ2g/vector_store/index.pkl b/CdE4T4oBgHgl3EQfeQ2g/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..46f524f6f5631ed8c28030fa488c32b22ffdf47b --- /dev/null +++ b/CdE4T4oBgHgl3EQfeQ2g/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aae97493fe8b0e1d354e04d71cd1f97905d1a98bf027f471340c7ec0a894b1a5 +size 223345 diff --git a/CtFRT4oBgHgl3EQfwTji/content/tmp_files/2301.13638v1.pdf.txt b/CtFRT4oBgHgl3EQfwTji/content/tmp_files/2301.13638v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0310df5343ec540bee74059de49133cf8ce47bf --- /dev/null +++ b/CtFRT4oBgHgl3EQfwTji/content/tmp_files/2301.13638v1.pdf.txt @@ -0,0 +1,595 @@ +Charge collection and efficiency measurements of the +TJ-Monopix2 DMAPS in 180 nm CMOS technology +Christian Bespin,𝑎,∗ Ivan Caicedo,𝑎 Jochen Dingfelder,𝑎 Tomasz Hemperek,𝑎,𝑒 Toko +Hirono,𝑎,𝑏 Fabian Hügging,𝑎 Hans Krüger,𝑎 Konstantinos Moustakas,𝑎,𝑐 Heinz +Pernegger,𝑑 Petra Riedler,𝑑 Lars Schall,𝑎 Walter Snoeys𝑑 and Norbert Wermes𝑎 +𝑎Physikalisches Institut, Universität Bonn, +Nußallee 12, Bonn, Germany +𝑏Deutsches Elektronen-Synchrotron (DESY) +Notkestaße. 85, Hamburg, Germany +𝑐Paul Scherrer Institut, +Forschungsstrasse 111, Villingen, Switzerland +𝑑CERN +Espl. des Particules 1, Meyrin, Switzerland +𝑒DECTRIS AG +Täfernweg 1, Baden-Dättwil, Switzerland +E-mail: bespin@physik.uni-bonn.de +Monolithic CMOS pixel detectors have emerged as competitive contenders in the field of high- +energy particle physics detectors. The use of commercial processes offers high-volume production +of such detectors. A series of prototypes has been designed in a 180 nm Tower CMOS process with +depletion of the sensor material and a column-drain readout architecture. The latest iteration, TJ- +Monopix2, features a large 2 × 2 cm2 matrix consisting of 512 × 512 pixels with 33.04 µm pitch. A +small collection electrode design aims at low power consumption and low noise while the radiation +tolerance for high-energy particle detector applications needs extra attention. With a goal to reach +radiation tolerance to levels of NIEL damage of 1 × 1015 1 MeV neq/cm2, a modification of the +standard process has been implemented by adding a low-dosed n-type silicon implant across the +pixel in order to allow for homogeneous depletion of the sensor volume. Recent lab measurements +and beam tests were conducted for unirradiated modules to study electrical characteristics and hit +detection efficiency. +10th International Workshop on Semiconductor Pixel Detectors for Particles and Imaging (Pixel2022) +12-16 December 2022 +Santa Fe, New Mexico, USA +∗Speaker +© Copyright owned by the author(s) under the terms of the Creative Commons +Attribution-NonCommercial-NoDerivatives 4.0 International License (CC BY-NC-ND 4.0). +https://pos.sissa.it/ +arXiv:2301.13638v1 [physics.ins-det] 31 Jan 2023 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +1. +Introduction +In recent years, advances in CMOS technologies have fueled the development of a new gener- +ation of monolithic active pixel sensors (MAPS) with fast readout and high radiation tolerance by +depleting the charge sensitive volume [1]. These depleted MAPS (DMAPS) devices are therefore an +interesting candidate for high-energy particle physics experiments with high radiation environments +and high particle rate. Depletion is achieved by either using high-voltage add-ons in the CMOS +technology and/or high resistivity substrates. The increasing availability of these features in com- +mercial CMOS processes could combine the features of the detector concept with possibly faster +and cheaper production than common hybrid pixel detectors for the mentioned purposes. The idea +behind and measurements results from one of multiple DMAPS prototypes, TJ-Monopix2 [2, 3], +will be presented in the following. +2. +Design of TJ-Monopix2 +TJ-Monopix2 is the latest DMAPS prototype from the TJ-Monopix development line which is +based on the ALPIDE pixel detector developed for the ALICE ITS upgrade [4]. It is fabricated in +the same 180 nm commercial CMOS process provided by Tower Semiconductor1. A modification +of the process used for ALPIDE has been implemented to increase the radiation tolerance to +levels ≥ 1 × 1015 neq cm−2 by adding a low dose n-type implant for homogeneous growth of the +depletion zone with applied bias voltage. In measurements on first prototypes with this modification, +a drop in hit detection efficiency was observed after irradiation [5, 6]. This could be improved +significantly by adding a gap in the n-type blanket or a deep p-type implant in the pixel corners +to shape the electrical field towards the collection electrode [7]. +The cross-sections of these +two sensor designs is shown in fig. 1. Additionally, chips have been produced on Czochralski +P+ SUBSTRATE +P- EPITAXIAL LAYER +COLLECTION N-WELL +LOW DOSE N-TYPE IMPLANT +DEEP PWELL +PWELL +PWELL +NWELL +DEEP PWELL +PWELL +PWELL +NWELL +(a) Modification with gap in low dose n-type implant be- +low pixel electronics. +P+ SUBSTRATE +P- EPITAXIAL LAYER +COLLECTION N-WELL +LOW DOSE N-TYPE IMPLANT +DEEP PWELL +PWELL +PWELL +NWELL +DEEP PWELL +PWELL +PWELL +NWELL +EXTRA DEEP PWELL +EXTRA DEEP PWELL +(b) Modification with continuous n-type implant and deep +p-type implant below pixel electronics. +Figure 1: Cross-section variants of modified sensor process for TJ-Monopix2. +silicon to increase the available depletable volume compared to the thickness of the epitaxial layer +(O(10 µm)). Measurements on Czochralski silicon chips in TJ-Monopix1 showed a further increase +in hit detection efficiency after irradiation [8]. +TJ-Monopix2 follows a small collection electrode approach with a pixel capacitance of +about 3 fF. The pixels of size 33 × 33 µm2 are read out using an established synchronous column- +drain technique from the FE-I3 readout chip [9]. Further changes from the predecessor TJ-Monopix1 +1https://towersemi.com +2 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +include an improved front-end design, a new pixel masking scheme and a 3-bit DAC for local +threshold tuning. With these changes the threshold is expected to be reduced by a factor of 3 while +improving the threshold dispersion and noise behavior. +The digital periphery contains logic for register configuration, data handling and LVDS output +drivers. Slow control is done via a command protocol and decoder that was taken from the RD53B +readout chip [10]. Both pixel and register data is 8b10b encoded in a frame-based data stream +which allows operating the chip with four differential data lines. +3. +Injection-based threshold and noise measurements +Initial tests have been performed in a laboratory setup to measure the threshold and noise +performance of TJ-Monopix2. All of these values are extracted from injecting different amounts of +charge into the pixel a given number of times and recording the amount of registered hits 𝑛hits. The +response function is a smeared step function of the form +𝑛hits(𝑞) = 1 +2 · 𝑛injections · +� +erf +�𝑞 − 𝑞thr +𝜎 +√ +2 +� ++ 1 +� +(1) +with 𝑞 the injected charge amount, 𝑛injections the number of consecutive injections of 𝑞 and 𝑞thr the +charge at the threshold. 𝜎 denotes the gaussian smearing of the step function which is defined +as the electronic noise of the pixel. The threshold is defined as the charge at which 50 % of the +injected hits are recorded from the pixel. Histogramming the individual thresholds from each pixel +leads to the distribution shown in fig. 2a. The distribution has a mean value of 164 e− and a width +50 +100 +150 +200 +250 +300 +Threshold / e +0 +250 +500 +750 +1000 +1250 +1500 +1750 +# of pixels +Fit results: += 164.3 e += 13.0 e +Threshold distribution for enabled pixels +1 +2 +3 +4 +5 +6 +7 +TDAC +(a) Threshold distribution before in-pixel threshold tuning. +50 +100 +150 +200 +250 +300 +Threshold / e +0 +2000 +4000 +6000 +8000 +# of pixels +Fit results: += 163.2 e += 2.7 e +Threshold distribution for enabled pixels +1 +2 +3 +4 +5 +6 +7 +TDAC +(b) Threshold distribution after in-pixel threshold tuning. +Figure 2: Threshold distribution of TJ-Monopix2 before and after adjusting the in-pixel threshold DAC to +lower the dispersion. Color-coded is the value of the DAC setting for every pixel. +of 13 e− which is defined as the threshold dispersion. By adjusting the threshold DAC in each pixel +in order to even out the deviations from the target threshold, the threshold dispersion can be reduced +significantly. The resulting distribution after this so-called tuning process is shown in fig. 2b. While +the mean threshold stays basically the same, the dispersion could be reduced by a factor of almost 5. +Both the mean threshold and threshold dispersion are significantly lower than in TJ-Monopix1, +where losses in hit detection efficiency could be observed due to too large thresholds [8]. +3 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +0 +5 +10 +15 +20 +25 +30 +35 +ENC / e +0 +100 +200 +300 +400 +500 +600 +700 +800 +# of pixels +Fit results: += 11.8 e += 1.5 e +Noise distribution for enabled pixels +(a) Noise distribution of TJ-Monopix1 with noticeable tail +towards larger values. +2 +4 +6 +8 +10 +ENC / e +0 +200 +400 +600 +800 +1000 +1200 +1400 +# of pixels +Fit results: += 5.6 e += 0.6 e +Noise distribution for enabled pixels +(b) Noise distribution of TJ-Monopix2. There is no observ- +able tail and lower noise overall. +Figure 3: Noise distribution of TJ-Monopix1 (left) and TJ-Monopix2 (right) for comparison. +The corresponding histogram of the electronic noise is depicted in fig. 3. As a comparison, +the distribution from the predecessor TJ-Monopix1 is included, where a large tail towards higher +values was observed that led to a high operational threshold in order to limit the amount of noisy +pixels. It can be seen, that this tail is largely removed with slight changes to the analog front-end, +which in turn lowers the threshold for a regular operation of the chip. +4. +Hit detection efficiency measurements +Two different pixel variations were investigated regarding their hit detection efficiency, that +will be presented in the following – a DC-coupled, more standard design which makes up most part +of the matrix and an AC-coupled investigative design realized in only a few columns of the matrix. +While the former was measured in more detail, some first results of the latter are included as well. +4.1 Standard DC-coupled pixel flavor +First measurements to determine the hit detection efficiency have been performed in a 5 GeV +electron beam at the DESY II testbeam facility at DESY, Hamburg [11]. Three unirradiated modules +were tested with different sensor geometries: two chips with 30 µm thick epitaxial silicon and the +two geometries depicted in fig. 1 as well as one chip built on 300 µm Czochralski silicon with a gap +in the low dose n-type implant (see fig. 1a). It should be noted that the different substrate materials +offer different sensor thicknesses and therefore charge-sensitive volume depending on the depletion. +The measurements are not targeting a comparison between different types of silicon. +Figures 4a and 4b show the recorded cluster charge for a chip with epitaxial layer and with +Czochralski substrate. It can be observed that the collected charge is about 25 % larger in the Cz +sample, because the depletion depth is only limited by the thickness of the sensor (300 µm) which +is by far not fully depleted, but more depleted than the 30 µm thick epitaxial layer in the other chip. +The average cluster size is significantly larger in the Cz sample as well which results in a high +spatial resolution due to charge-weighted clustering. The cluster size distributions for the same +samples as above are depicted in figs. 4c and 4d. While cluster size 1 is predominant in the epitaxial +4 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +2000 +4000 +6000 +8000 +10000 +12000 +Cluster charge / e +0 +1000 +2000 +3000 +4000 +5000 +# +MPV charge: 2579 e +Data +(a) Cluster charge distribution for an epitaxial silicon chip. +2000 +4000 +6000 +8000 +10000 +12000 +Cluster charge / e +0 +500 +1000 +1500 +2000 +2500 +# +MPV charge: 3235 e +Data +(b) Cluster charge for a Czochralski silicon chip. +1 +2 +3 +4 +5 +6 +Cluster size +0 +20000 +40000 +60000 +80000 +100000 +# +Mean cluster size: 1.51 +(c) Cluster size distribution for an epitaxial silicon chip. +1 +2 +3 +4 +5 +6 +Cluster size +0 +5000 +10000 +15000 +20000 +25000 +30000 +35000 +# +Mean cluster size: 1.95 +(d) Cluster size distribution for a Czochralski silicon chip. +Figure 4: Cluster charge and size distributions for a chip with 30 µm epitaxial silicon (left) and 300 µm +Czochralski silicon (right) at −6 V bias voltage. The latter can be depleted further than 30 µm resulting in a +larger cluster charge and size. Both chips were operated at a threshold of 200 e−. +sample, the Cz sample has mainly clusters of size 2. The corresponding average cluster size is 1.55 +and 1.95, respectively. +Taking the pointing resolution of the beam telescope into account, an +intrinsic spatial resolution of 8.6 µm could be achieved in a Czochralski silicon sample. +The hit detection efficiency was measured with a beam telescope with six Mimosa26 planes +and a FE-I4 time reference plane which are all connected to a trigger logic unit to synchronize +individual detector hits time-wise. The efficiency for all three modules is shown in fig. 5 where +the result for every pixel was mapped onto a two by two pixel cell to increase the statistics to see +possible effects or efficiency losses within a single pixel. All samples were running at a threshold of +about 200 e− and achieve a hit detection efficiency around 99.80 % with slight deviations within the +error (estimated < 0.1 %). There are no losses observable in the pixel corners or between pixels. +4.2 AC-coupled pixel flavor +Another pixel variation with different analog front-end was tested as well to determine its +performance in a particle beam. In this design the (positive) bias voltage is applied via a diode on +the top side of the chip and connected to the charge collection n-well. To avoid breakdown of the +front-end electronics due to the high voltage (≤ 50 V) on that well, the input signal is AC coupled to +5 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +0 +10 +20 +30 +40 +50 +60 +column [ m] +0 +10 +20 +30 +40 +50 +60 +row [ m] +Region 1 (Center): In-pixel efficiency +for DUT +90.00 +91.25 +92.50 +93.75 +95.00 +96.25 +97.50 +98.75 +100.00 +(a) (99.80 ± 0.10) % efficiency for a chip built on epitaxial +silicon with gap in n-layer modification from fig. 1a. +0 +10 +20 +30 +40 +50 +60 +column [ m] +0 +10 +20 +30 +40 +50 +60 +row [ m] +Region 1 (Center): In-pixel efficiency +for DUT +90.00 +91.25 +92.50 +93.75 +95.00 +96.25 +97.50 +98.75 +100.00 +(b) (99.79 ± 0.10) % efficiency for a chip built on Cz sili- +con with gap in n-layer modification from fig. 1a. +0 +10 +20 +30 +40 +50 +60 +column [ m] +0 +10 +20 +30 +40 +50 +60 +row [ m] +Region 1 (Center): In-pixel efficiency +for DUT +90.00 +91.25 +92.50 +93.75 +95.00 +96.25 +97.50 +98.75 +100.00 +(c) (99.85 ± 0.10) % efficiency for a chip built on epitaxial +silicon with additional p-well modification from fig. 1b. +Figure 5: Hit detection efficiencies for different substrate materials with different thicknesses and sensor +geometries. Results were mapped onto a 2 x 2 pixel array for higher statistics and in-pixel resolution. The +chips were operated with −6 V bias voltage and at a 200 e− threshold. +the amplifier. This approach can potentially deplete the substrate further due to the higher voltage +than what can be applied in the standard pixel design. The hit detection efficiency was measured +for different bias voltages and is shown in fig. 6. At 5 V the efficiency is already above 99 % and +reaches the same value as for the DC coupled pixel flavor of 99.85 % at or before 25 V bias voltage. +This is, taking the slightly higher threshold into account, in agreement with the expectation that +there should be no noticeable difference in hit detection efficiency before irradiation between the +two pixel flavors. The larger applicable bias voltage could prove superior after irradiation to achieve +more depletion and therefore higher charge signal. +5. +Conclusion +In summary, the performance of TJ-Monopix2 shows a significant improvement in threshold +value and dispersion compared to TJ-Monopix1, although the former is higher than its design value +6 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +0 +10 +20 +30 +40 +50 +60 +column [ m] +0 +10 +20 +30 +40 +50 +60 +row [ m] +Region 1 (HV CASC): In-pixel efficiency +for DUT +90.00 +91.25 +92.50 +93.75 +95.00 +96.25 +97.50 +98.75 +100.00 +(a) (99.21 ± 0.10) % efficiency of an AC-coupled pixel +flavor at 5 V bias voltage and 250 e− threshold. +0 +10 +20 +30 +40 +50 +60 +column [ m] +0 +10 +20 +30 +40 +50 +60 +row [ m] +Region 1 (HV CASC): In-pixel efficiency +for DUT +90.00 +91.25 +92.50 +93.75 +95.00 +96.25 +97.50 +98.75 +100.00 +(b) (99.85 ± 0.10) % efficiency of an AC-coupled pixel +flavor at 25 V bias voltage and 200 e− threshold. +Figure 6: Hit detection efficiency of an AC-coupled pixel flavor at (6a) 5 V and (6b) 25 V bias voltage. +(120 e−). With the measured amount of charge in the sensor the threshold is still small enough +to detect a majority of hits even from large clusters before irradiation. The large signal compared +to the small pixel leads to a large cluster size and therefore high spatial resolution, where chips +on Czochralski substrate perform slightly better due to the larger sensor volume. For the tested +sensor materials with different thicknesses and sensor geometries, the hit detection efficiency is +around 99.8 % or better in all cases. The modified front-end version with bias applied on the charge +collection node achieves similar values for the hit detection efficiency while providing a larger +headroom in bias voltage to achieve efficient performance after radiation damage. The results for +irradiated modules will be presented in a forthcoming publication. +Acknowledgments +This project has received funding from the Deutsche Forschungsgemeinschaft DFG (grant WE +976/4-1), the German Federal Ministry of Education and Research BMBF (grant 05H15PDCA9), +and the European Union’s Horizon 2020 research and innovation program under grant agreements +no. 675587 (Maria Sklodowska-Curie ITN STREAM), 654168 (AIDA-2020), and 101004761 +(AIDAinnova). The measurements leading to these results have been performed at the Test Beam +Facility at DESY Hamburg (Germany), a member of the Helmholtz Association (HGF). +References +[1] I. Perić, A novel monolithic pixelated particle detector implemented in high-voltage CMOS +technology, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, +Spectrometers, Detectors and Associated Equipment 582 (2007) 876. +[2] K. Moustakas, M. Barbero, I. Berdalovic, C. Bespin, P. Breugnon, I. Caicedo et al., CMOS +monolithic pixel sensors based on the column-drain architecture for the HL-LHC upgrade, +7 + +TJ-Monopix2: DMAPS in 180 nm CMOS technology +Christian Bespin +Nuclear Instruments and Methods in Physics Research Section A: Accelerators, +Spectrometers, Detectors and Associated Equipment 936 (2019) 604. +[3] Konstantinos Moustakas, Design and Development of Depleted Monolithic Active Pixel +Sensors with Small Collection Electrode for High-Radiation Applications, Ph.D. thesis, +Rheinische Friedrich-Wilhelms-Universität Bonn, Sept., 2021. +[4] M. Mager, ALPIDE, the Monolithic Active Pixel Sensor for the ALICE ITS upgrade, Nuclear +Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, +Detectors and Associated Equipment 824 (2016) 434 . +[5] I. Caicedo, M. Barbero, P. Barrillon, I. Berdalovic, S. Bhat, C. Bespin et al., The Monopix +chips: depleted monolithic active pixel sensors with a column-drain read-out architecture for +the ATLAS Inner Tracker upgrade, Journal of Instrumentation 14 (2019) C06006. +[6] C. Bespin, M. Barbero, P. Barrillon, I. Berdalovic, S. Bhat, P. Breugnon et al., DMAPS +Monopix developments in large and small electrode designs, Nuclear Instruments and +Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and +Associated Equipment 978 (2020) 164460. +[7] M. Dyndal, V. Dao, P. Allport, I.A. Tortajada, M. Barbero, S. Bhat et al., Mini-MALTA: +radiation hard pixel designs for small-electrode monolithic CMOS sensors for the High +Luminosity LHC, Journal of Instrumentation 15 (2020) P02005. +[8] C. Bespin, I. Berdalovic, I. Caicedo, R. Cardella, J. Dingfelder, L. Flores et al., Development +and characterization of a DMAPS chip in TowerJazz 180 nm technology for high radiation +environments, Nuclear Instruments and Methods in Physics Research Section A: +Accelerators, Spectrometers, Detectors and Associated Equipment 1040 (2022) 167189. +[9] I. Perić, L. Blanquart, G. Comes, P. Denes, K. Einsweiler, P. Fischer et al., The FEI3 readout +chip for the ATLAS pixel detector, Nuclear Instruments and Methods in Physics Research +Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 565 (2006) +178. +[10] RD53 collaboration, RD53B Manual, Tech. Rep. CERN-RD53-PUB-19-002, CERN, Geneva +(Mar, 2019). +[11] R. Diener, J. Dreyling-Eschweiler, H. Ehrlichmann, I. Gregor, U. Kötz, U. Krämer et al., The +DESY II test beam facility, Nuclear Instruments and Methods in Physics Research Section A: +Accelerators, Spectrometers, Detectors and Associated Equipment 922 (2019) 265. +8 + diff --git a/CtFRT4oBgHgl3EQfwTji/content/tmp_files/load_file.txt b/CtFRT4oBgHgl3EQfwTji/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..9cbb73a10394ac853569b9bc0beac8fcd4af2000 --- /dev/null +++ b/CtFRT4oBgHgl3EQfwTji/content/tmp_files/load_file.txt @@ -0,0 +1,276 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf,len=275 +page_content='Charge collection and efficiency measurements of the TJ-Monopix2 DMAPS in 180 nm CMOS technology Christian Bespin,𝑎,∗ Ivan Caicedo,𝑎 Jochen Dingfelder,𝑎 Tomasz Hemperek,𝑎,𝑒 Toko Hirono,𝑎,𝑏 Fabian Hügging,𝑎 Hans Krüger,𝑎 Konstantinos Moustakas,𝑎,𝑐 Heinz Pernegger,𝑑 Petra Riedler,𝑑 Lars Schall,𝑎 Walter Snoeys𝑑 and Norbert Wermes𝑎 𝑎Physikalisches Institut, Universität Bonn, Nußallee 12, Bonn, Germany 𝑏Deutsches Elektronen-Synchrotron (DESY) Notkestaße.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 85, Hamburg, Germany 𝑐Paul Scherrer Institut, Forschungsstrasse 111, Villingen, Switzerland 𝑑CERN Espl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' des Particules 1, Meyrin, Switzerland 𝑒DECTRIS AG Täfernweg 1, Baden-Dättwil, Switzerland E-mail: bespin@physik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='uni-bonn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='de Monolithic CMOS pixel detectors have emerged as competitive contenders in the field of high- energy particle physics detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The use of commercial processes offers high-volume production of such detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' A series of prototypes has been designed in a 180 nm Tower CMOS process with depletion of the sensor material and a column-drain readout architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The latest iteration, TJ- Monopix2, features a large 2 × 2 cm2 matrix consisting of 512 × 512 pixels with 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='04 µm pitch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' A small collection electrode design aims at low power consumption and low noise while the radiation tolerance for high-energy particle detector applications needs extra attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' With a goal to reach radiation tolerance to levels of NIEL damage of 1 × 1015 1 MeV neq/cm2, a modification of the standard process has been implemented by adding a low-dosed n-type silicon implant across the pixel in order to allow for homogeneous depletion of the sensor volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Recent lab measurements and beam tests were conducted for unirradiated modules to study electrical characteristics and hit detection efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 10th International Workshop on Semiconductor Pixel Detectors for Particles and Imaging (Pixel2022) 12-16 December 2022 Santa Fe, New Mexico, USA ∗Speaker © Copyright owned by the author(s) under the terms of the Creative Commons Attribution-NonCommercial-NoDerivatives 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='0 International License (CC BY-NC-ND 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' https://pos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='sissa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='it/ arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='13638v1 [physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='ins-det] 31 Jan 2023 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Introduction In recent years, advances in CMOS technologies have fueled the development of a new gener- ation of monolithic active pixel sensors (MAPS) with fast readout and high radiation tolerance by depleting the charge sensitive volume [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' These depleted MAPS (DMAPS) devices are therefore an interesting candidate for high-energy particle physics experiments with high radiation environments and high particle rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Depletion is achieved by either using high-voltage add-ons in the CMOS technology and/or high resistivity substrates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The increasing availability of these features in com- mercial CMOS processes could combine the features of the detector concept with possibly faster and cheaper production than common hybrid pixel detectors for the mentioned purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The idea behind and measurements results from one of multiple DMAPS prototypes, TJ-Monopix2 [2, 3], will be presented in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Design of TJ-Monopix2 TJ-Monopix2 is the latest DMAPS prototype from the TJ-Monopix development line which is based on the ALPIDE pixel detector developed for the ALICE ITS upgrade [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' It is fabricated in the same 180 nm commercial CMOS process provided by Tower Semiconductor1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' A modification of the process used for ALPIDE has been implemented to increase the radiation tolerance to levels ≥ 1 × 1015 neq cm−2 by adding a low dose n-type implant for homogeneous growth of the depletion zone with applied bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' In measurements on first prototypes with this modification, a drop in hit detection efficiency was observed after irradiation [5, 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' This could be improved significantly by adding a gap in the n-type blanket or a deep p-type implant in the pixel corners to shape the electrical field towards the collection electrode [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The cross-sections of these two sensor designs is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Additionally, chips have been produced on Czochralski P+ SUBSTRATE P- EPITAXIAL LAYER COLLECTION N-WELL LOW DOSE N-TYPE IMPLANT DEEP PWELL PWELL PWELL NWELL DEEP PWELL PWELL PWELL NWELL (a) Modification with gap in low dose n-type implant be- low pixel electronics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' P+ SUBSTRATE P- EPITAXIAL LAYER COLLECTION N-WELL LOW DOSE N-TYPE IMPLANT DEEP PWELL PWELL PWELL NWELL DEEP PWELL PWELL PWELL NWELL EXTRA DEEP PWELL EXTRA DEEP PWELL (b) Modification with continuous n-type implant and deep p-type implant below pixel electronics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figure 1: Cross-section variants of modified sensor process for TJ-Monopix2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' silicon to increase the available depletable volume compared to the thickness of the epitaxial layer (O(10 µm)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Measurements on Czochralski silicon chips in TJ-Monopix1 showed a further increase in hit detection efficiency after irradiation [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' TJ-Monopix2 follows a small collection electrode approach with a pixel capacitance of about 3 fF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The pixels of size 33 × 33 µm2 are read out using an established synchronous column- drain technique from the FE-I3 readout chip [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Further changes from the predecessor TJ-Monopix1 1https://towersemi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='com 2 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin include an improved front-end design, a new pixel masking scheme and a 3-bit DAC for local threshold tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' With these changes the threshold is expected to be reduced by a factor of 3 while improving the threshold dispersion and noise behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The digital periphery contains logic for register configuration, data handling and LVDS output drivers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Slow control is done via a command protocol and decoder that was taken from the RD53B readout chip [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Both pixel and register data is 8b10b encoded in a frame-based data stream which allows operating the chip with four differential data lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Injection-based threshold and noise measurements Initial tests have been performed in a laboratory setup to measure the threshold and noise performance of TJ-Monopix2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' All of these values are extracted from injecting different amounts of charge into the pixel a given number of times and recording the amount of registered hits 𝑛hits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The response function is a smeared step function of the form 𝑛hits(𝑞) = 1 2 · 𝑛injections · � erf �𝑞 − 𝑞thr 𝜎 √ 2 � + 1 � (1) with 𝑞 the injected charge amount, 𝑛injections the number of consecutive injections of 𝑞 and 𝑞thr the charge at the threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 𝜎 denotes the gaussian smearing of the step function which is defined as the electronic noise of the pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The threshold is defined as the charge at which 50 % of the injected hits are recorded from the pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Histogramming the individual thresholds from each pixel leads to the distribution shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 2a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The distribution has a mean value of 164 e− and a width 50 100 150 200 250 300 Threshold / e 0 250 500 750 1000 1250 1500 1750 # of pixels Fit results: = 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='3 e = 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='0 e Threshold distribution for enabled pixels 1 2 3 4 5 6 7 TDAC (a) Threshold distribution before in-pixel threshold tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 50 100 150 200 250 300 Threshold / e 0 2000 4000 6000 8000 # of pixels Fit results: = 163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='2 e = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='7 e Threshold distribution for enabled pixels 1 2 3 4 5 6 7 TDAC (b) Threshold distribution after in-pixel threshold tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figure 2: Threshold distribution of TJ-Monopix2 before and after adjusting the in-pixel threshold DAC to lower the dispersion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Color-coded is the value of the DAC setting for every pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' of 13 e− which is defined as the threshold dispersion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' By adjusting the threshold DAC in each pixel in order to even out the deviations from the target threshold, the threshold dispersion can be reduced significantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The resulting distribution after this so-called tuning process is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 2b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' While the mean threshold stays basically the same, the dispersion could be reduced by a factor of almost 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Both the mean threshold and threshold dispersion are significantly lower than in TJ-Monopix1, where losses in hit detection efficiency could be observed due to too large thresholds [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 3 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 0 5 10 15 20 25 30 35 ENC / e 0 100 200 300 400 500 600 700 800 # of pixels Fit results: = 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='8 e = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='5 e Noise distribution for enabled pixels (a) Noise distribution of TJ-Monopix1 with noticeable tail towards larger values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 2 4 6 8 10 ENC / e 0 200 400 600 800 1000 1200 1400 # of pixels Fit results: = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='6 e = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='6 e Noise distribution for enabled pixels (b) Noise distribution of TJ-Monopix2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' There is no observ- able tail and lower noise overall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figure 3: Noise distribution of TJ-Monopix1 (left) and TJ-Monopix2 (right) for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The corresponding histogram of the electronic noise is depicted in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' As a comparison, the distribution from the predecessor TJ-Monopix1 is included, where a large tail towards higher values was observed that led to a high operational threshold in order to limit the amount of noisy pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' It can be seen, that this tail is largely removed with slight changes to the analog front-end, which in turn lowers the threshold for a regular operation of the chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Hit detection efficiency measurements Two different pixel variations were investigated regarding their hit detection efficiency, that will be presented in the following – a DC-coupled, more standard design which makes up most part of the matrix and an AC-coupled investigative design realized in only a few columns of the matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' While the former was measured in more detail, some first results of the latter are included as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='1 Standard DC-coupled pixel flavor First measurements to determine the hit detection efficiency have been performed in a 5 GeV electron beam at the DESY II testbeam facility at DESY, Hamburg [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Three unirradiated modules were tested with different sensor geometries: two chips with 30 µm thick epitaxial silicon and the two geometries depicted in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1 as well as one chip built on 300 µm Czochralski silicon with a gap in the low dose n-type implant (see fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' It should be noted that the different substrate materials offer different sensor thicknesses and therefore charge-sensitive volume depending on the depletion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The measurements are not targeting a comparison between different types of silicon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figures 4a and 4b show the recorded cluster charge for a chip with epitaxial layer and with Czochralski substrate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' It can be observed that the collected charge is about 25 % larger in the Cz sample, because the depletion depth is only limited by the thickness of the sensor (300 µm) which is by far not fully depleted, but more depleted than the 30 µm thick epitaxial layer in the other chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The average cluster size is significantly larger in the Cz sample as well which results in a high spatial resolution due to charge-weighted clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The cluster size distributions for the same samples as above are depicted in figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 4c and 4d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' While cluster size 1 is predominant in the epitaxial 4 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 2000 4000 6000 8000 10000 12000 Cluster charge / e 0 1000 2000 3000 4000 5000 # MPV charge: 2579 e Data (a) Cluster charge distribution for an epitaxial silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 2000 4000 6000 8000 10000 12000 Cluster charge / e 0 500 1000 1500 2000 2500 # MPV charge: 3235 e Data (b) Cluster charge for a Czochralski silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1 2 3 4 5 6 Cluster size 0 20000 40000 60000 80000 100000 # Mean cluster size: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='51 (c) Cluster size distribution for an epitaxial silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1 2 3 4 5 6 Cluster size 0 5000 10000 15000 20000 25000 30000 35000 # Mean cluster size: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='95 (d) Cluster size distribution for a Czochralski silicon chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figure 4: Cluster charge and size distributions for a chip with 30 µm epitaxial silicon (left) and 300 µm Czochralski silicon (right) at −6 V bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The latter can be depleted further than 30 µm resulting in a larger cluster charge and size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Both chips were operated at a threshold of 200 e−.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' sample, the Cz sample has mainly clusters of size 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The corresponding average cluster size is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='55 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='95, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Taking the pointing resolution of the beam telescope into account, an intrinsic spatial resolution of 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='6 µm could be achieved in a Czochralski silicon sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The hit detection efficiency was measured with a beam telescope with six Mimosa26 planes and a FE-I4 time reference plane which are all connected to a trigger logic unit to synchronize individual detector hits time-wise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The efficiency for all three modules is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 5 where the result for every pixel was mapped onto a two by two pixel cell to increase the statistics to see possible effects or efficiency losses within a single pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' All samples were running at a threshold of about 200 e− and achieve a hit detection efficiency around 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='80 % with slight deviations within the error (estimated < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='1 %).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' There are no losses observable in the pixel corners or between pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='2 AC-coupled pixel flavor Another pixel variation with different analog front-end was tested as well to determine its performance in a particle beam.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' In this design the (positive) bias voltage is applied via a diode on the top side of the chip and connected to the charge collection n-well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' To avoid breakdown of the front-end electronics due to the high voltage (≤ 50 V) on that well, the input signal is AC coupled to 5 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (Center): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 (a) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='80 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='10) % efficiency for a chip built on epitaxial silicon with gap in n-layer modification from fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (Center): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 (b) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='79 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='10) % efficiency for a chip built on Cz sili- con with gap in n-layer modification from fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (Center): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 (c) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='85 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='10) % efficiency for a chip built on epitaxial silicon with additional p-well modification from fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 1b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figure 5: Hit detection efficiencies for different substrate materials with different thicknesses and sensor geometries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Results were mapped onto a 2 x 2 pixel array for higher statistics and in-pixel resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The chips were operated with −6 V bias voltage and at a 200 e− threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' the amplifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' This approach can potentially deplete the substrate further due to the higher voltage than what can be applied in the standard pixel design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The hit detection efficiency was measured for different bias voltages and is shown in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' At 5 V the efficiency is already above 99 % and reaches the same value as for the DC coupled pixel flavor of 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='85 % at or before 25 V bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' This is, taking the slightly higher threshold into account, in agreement with the expectation that there should be no noticeable difference in hit detection efficiency before irradiation between the two pixel flavors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The larger applicable bias voltage could prove superior after irradiation to achieve more depletion and therefore higher charge signal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Conclusion In summary, the performance of TJ-Monopix2 shows a significant improvement in threshold value and dispersion compared to TJ-Monopix1, although the former is higher than its design value 6 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (HV CASC): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 (a) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='21 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='10) % efficiency of an AC-coupled pixel flavor at 5 V bias voltage and 250 e− threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 0 10 20 30 40 50 60 column [ m] 0 10 20 30 40 50 60 row [ m] Region 1 (HV CASC): In-pixel efficiency for DUT 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='25 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='50 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='00 (b) (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='85 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='10) % efficiency of an AC-coupled pixel flavor at 25 V bias voltage and 200 e− threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Figure 6: Hit detection efficiency of an AC-coupled pixel flavor at (6a) 5 V and (6b) 25 V bias voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' (120 e−).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' With the measured amount of charge in the sensor the threshold is still small enough to detect a majority of hits even from large clusters before irradiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The large signal compared to the small pixel leads to a large cluster size and therefore high spatial resolution, where chips on Czochralski substrate perform slightly better due to the larger sensor volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' For the tested sensor materials with different thicknesses and sensor geometries, the hit detection efficiency is around 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='8 % or better in all cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The modified front-end version with bias applied on the charge collection node achieves similar values for the hit detection efficiency while providing a larger headroom in bias voltage to achieve efficient performance after radiation damage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The results for irradiated modules will be presented in a forthcoming publication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Acknowledgments This project has received funding from the Deutsche Forschungsgemeinschaft DFG (grant WE 976/4-1), the German Federal Ministry of Education and Research BMBF (grant 05H15PDCA9), and the European Union’s Horizon 2020 research and innovation program under grant agreements no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 675587 (Maria Sklodowska-Curie ITN STREAM), 654168 (AIDA-2020), and 101004761 (AIDAinnova).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' The measurements leading to these results have been performed at the Test Beam Facility at DESY Hamburg (Germany), a member of the Helmholtz Association (HGF).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' References [1] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Perić, A novel monolithic pixelated particle detector implemented in high-voltage CMOS technology, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 582 (2007) 876.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [2] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Moustakas, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Barbero, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Berdalovic, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bespin, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Breugnon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Caicedo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', CMOS monolithic pixel sensors based on the column-drain architecture for the HL-LHC upgrade, 7 TJ-Monopix2: DMAPS in 180 nm CMOS technology Christian Bespin Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 936 (2019) 604.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [3] Konstantinos Moustakas, Design and Development of Depleted Monolithic Active Pixel Sensors with Small Collection Electrode for High-Radiation Applications, Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' thesis, Rheinische Friedrich-Wilhelms-Universität Bonn, Sept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Mager, ALPIDE, the Monolithic Active Pixel Sensor for the ALICE ITS upgrade, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 824 (2016) 434 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [5] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Caicedo, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Barbero, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Barrillon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Berdalovic, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bhat, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bespin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', The Monopix chips: depleted monolithic active pixel sensors with a column-drain read-out architecture for the ATLAS Inner Tracker upgrade, Journal of Instrumentation 14 (2019) C06006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [6] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bespin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Barbero, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Barrillon, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Berdalovic, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bhat, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Breugnon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', DMAPS Monopix developments in large and small electrode designs, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 978 (2020) 164460.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [7] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Dyndal, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Dao, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Allport, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Tortajada, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Barbero, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bhat et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', Mini-MALTA: radiation hard pixel designs for small-electrode monolithic CMOS sensors for the High Luminosity LHC, Journal of Instrumentation 15 (2020) P02005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [8] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Bespin, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Berdalovic, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Caicedo, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Cardella, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Dingfelder, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Flores et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', Development and characterization of a DMAPS chip in TowerJazz 180 nm technology for high radiation environments, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 1040 (2022) 167189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [9] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Perić, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Blanquart, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Comes, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Denes, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Einsweiler, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Fischer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', The FEI3 readout chip for the ATLAS pixel detector, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 565 (2006) 178.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [10] RD53 collaboration, RD53B Manual, Tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' CERN-RD53-PUB-19-002, CERN, Geneva (Mar, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' [11] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Diener, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Dreyling-Eschweiler, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Ehrlichmann, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Gregor, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Kötz, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' Krämer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=', The DESY II test beam facility, Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 922 (2019) 265.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} +page_content=' 8' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/CtFRT4oBgHgl3EQfwTji/content/2301.13638v1.pdf'} diff --git a/DNE0T4oBgHgl3EQfygIs/content/tmp_files/2301.02659v1.pdf.txt b/DNE0T4oBgHgl3EQfygIs/content/tmp_files/2301.02659v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..d8dfd4f0d5c36a40a80254bfa5b03de4b41a4760 --- /dev/null +++ b/DNE0T4oBgHgl3EQfygIs/content/tmp_files/2301.02659v1.pdf.txt @@ -0,0 +1,1969 @@ +Bayesian Modelling +of +Visual Discrimination Learning in Mice +Pouya Baniasadi, PhD +Department of Physiology, Development and Neuroscience +UNIVERSITY OF CAMBRIDGE +August 2020 +This project report is written in partial fulfilment of the requirement for the +Master of Philosophy in Basic and Translational Neuroscience +Supervised by +Dr. Jasper Poort +Prof. Máté Lengyel +Selective Vision Laboratory +Computational and Biological +Learning Laboratory +Department of Psychology +Department of Engineering +arXiv:2301.02659v1 [q-bio.NC] 15 Nov 2022 + +* +入Dedication +For my parents Mahin and Ghasem, who taught me about pursuing dreams, +for their endless love, support and sacrifices +i + +Declaration +This report describes work carried out at Cambridge University from Jan 2020 to Jul 2020 +under the supervision of Dr Jasper Poort (Selective Vision Laboratory at the Department +of Psychology) and Prof. Máté Lengyel (Computational and Biological Learning Lab at +the Department of Engineering) as a part of the MPhil program in Basic and Translational +Neuroscience. I confirm that the material in this report is not copied from any published +material, nor is it a paraphrase or abstract of any published material unless it is identified +as such and a full source reference is given. I confirm that, other than where indicated +above, this document is my own work. +Pouya Baniasadi +August 2020 +ii + +Abstract +The brain constantly turns large flows of sensory information into selective representations +of the environment. It, therefore, needs to learn to process those sensory inputs that +are most relevant for behaviour. It is not well understood how learning changes neural +circuits in visual and decision-making brain areas to adjust and improve its visually guided +decision-making. To address this question, head-fixed mice were trained to move through +virtual reality environments and learn visual discrimination while neural activity was +recorded with two-photon calcium imaging. Previously, descriptive models of neuronal +activity were fitted to the data, which was used to compare the activity of excitatory and +different inhibitory cell types. However, the previous models did not take the internal +representations and learning dynamics into account. Here, I present a framework to infer +a model of internal representations that are used to generate the behaviour during the +task. We model the learning process from untrained mice to trained mice within the +normative framework of the ideal Bayesian observer and provide a Markov model for +generating the movement and licking. The framework provides a space of models where +a range of hypotheses about the internal representations could be compared for a given +data set. +iii + +Contents +Contents +Declaration +ii +Abstract +iii +1 +Introduction +1 +1.1 +Mathematical preliminaries +. . . . . . . . . . . . . . . . . . . . . . . . . +2 +2 +The experiment +6 +2.1 +Experimental setup . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +6 +2.2 +Behavioral data and observations . . . . . . . . . . . . . . . . . . . . . . +8 +3 +Behavioral model part 1: internal representations +13 +3.1 +Structure of spatial states +. . . . . . . . . . . . . . . . . . . . . . . . . . +14 +3.2 +Space of models for spatial states . . . . . . . . . . . . . . . . . . . . . . +16 +3.3 +Bayesian learning model . . . . . . . . . . . . . . . . . . . . . . . . . . . +18 +3.3.1 +Learning reward probability within a state . . . . . . . . . . . . . +19 +3.3.2 +Learning state transitions +. . . . . . . . . . . . . . . . . . . . . . +21 +4 +Behavioral model part 2: the generative model +25 +4.1 +Spatial state parameter ˜λk: licking rate . . . . . . . . . . . . . . . . . . . +26 +4.2 +Parameter ˜νk: target speed within the current spatial state +. . . . . . . +29 +4.3 +Generative model of licking and speed +. . . . . . . . . . . . . . . . . . . +30 +4.4 +Estimation of model parameters . . . . . . . . . . . . . . . . . . . . . . . +33 +5 +Discussion +34 +5.1 +Limitations +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +34 +5.2 +Implications . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +35 +Bibliography +37 +iv + +Chapter 1 +Introduction +Learning modifies neural representations of behaviourally relevant information. While +changes in response selectivity to behaviourally relevant stimuli have been observed in +many studies across different species (Yang & Maunsell 2004, Yan et al. 2014, Poort +et al. 2015). There has been growing evidence that different cell types, classified using +molecular and cellular properties (Kepecs & Fishell 2014), have specific roles in learning +(Khan et al. 2018, Fishell & Kepecs 2019). However, the nature of these changes and +how they relate to sensory coding is not well understood (Yap & Greenberg 2018). +Probabilistic models of behavioural learning are an important approach to link the +changes in neural representations to internal representation of the environment and +decision-making (Fiser et al. 2010, Berkes et al. 2011, Heeger 2017). Given the non- +deterministic nature of events in the real world, human and animal learning must involve +at least some internal representations of the uncertainties in the environment (Barlow +et al. 1961). There has been an extensive body of research on how the nervous system +represents uncertainly about the environment (Pouget et al. 2003, Beck et al. 2008, Fiser +et al. 2010, Kriegeskorte & Douglas 2018). +Bayesian learning theory provides a normative framework of learning that represents +uncertainty in probabilistic outcomes(Bishop 2006). +In particular, the ideal observer +analysis uses the Bayesian learning theory for achieving optimal learning performance in +a given task (Geisler 2003, 2011). Learning can be conceptualised as the incorporation +of sensory information to update and improve performance on a given task. the ideal +observer performs at the theoretical limits of information processing to update their +1 + +1.1. +Mathematical preliminaries +beliefs. However, it is important to note that optimality in this context refers to the +optimal incorporation of information, which is not equivalent to achieving the optimal +solution in all trials. While the nervous system may or may not have representations +similar to an ideal observer, the ideal observer analysis provides a systematic framework to +formulate hypotheses about the internal representations and learning dynamics (Maloney +& Mamassian 2009, Orbán et al. 2008). +In this thesis, we describe a Bayesian learning model using the framework of ideal observer +learning. Our goal is to develop a model of internal representations of reward and space +that are used for learning and adjusting behaviour in the visual discrimination task. This +model will allow us in future work to relate the neuronal activity measurements (Poort +et al. 2015, Khan et al. 2018) to the internal representations that guide behaviour. We +continue this chapter with a brief overview of the basic mathematical ideas used to develop +the model. Then, in Chapter 2, we explain the experimental setup and describe the +behavioural data. A space of models (for the structure of Markov models) is introduced +in Chapter 3 which defines the internal representations of reward and state transitions. +Then, a Bayesian model of learning reward probabilities and state transitions is described +that uses the ideal observer framework. In Chapter 4, we introduce a generative Markov +model that uses internal representations to generate behaviour. We also discuss the use +of maximum likelihood estimation to estimate the model parameters. Finally, in Chapter +5, we discuss the potential applications and limitations of the model and set out a path +for the continuation of the research. +1.1 +Mathematical preliminaries +In this section, I briefly introduce the concepts that provide the mathematical foundation +of the Behavioral model. +Markov chain model +A system has the Markov property if the predictions about future events only require the +knowledge of the system’s present state. In other words, given the present state of the +system, future events are conditionally independent of past events. A Markov chain is a +stochastic model of a sequence of events with the Markov property. +Let S = s1, s2, ..., sr be a set of states for a Markov chain. The process starts in one of +these states and moves sequentially from one state to another. Each move is called a step. +Let Xn be the current step. We denote by pij = P(Xn+1 = sj|Xn = si), the transition +2 + +1.1. +Mathematical preliminaries +probability of visiting state sj after visiting si. Note that by Markov property, given Xn, +Xn+1 is conditionally independent of the past states. A transition from si to sj can be +represented as a directed edge (si, sj) with a corresponding transition probability pij. The +sum of transition probabilities of the outgoing edges from a state should add up to 1. +Figure 1.1 illustrates a Markov chain with 4 states and transition probabilities. +Figure 1.1: Movement in a corridor simulated in the VR environment. +Let T = [pij] be the transition probability matrix for the Markov chain and let u be +the probability vector which represents the starting distribution (i.e., Xk ∼ u). Then +the probability that the chain is in state si after m steps is the i-th entry in the vector +u(m) := u T m. That is, +P(Xk+m|Xk) = u(i)(m), +where u(m) = uT m. +(1.1) +Bayesian learning +The probability of an event A is denoted by P(A). Consider another event B and its +corresponding probability P(B). The conditional probability P(A|B) is the conditional +probability of A given B. Bayes Theorem states that +P(A|B) = P(B|A) P(A) +P(B) +Consider a system that generates data and a space of possible models for describing +the behaviour of the system. +The probability distribution over the space of models +P(Model) represents the prior knowledge about the system. Suppose that a set of data +D is observed from the system. Then P(D | Model) is called the likelihood and P(Data) +3 + +0.3 +0.7 +S1 +S2 +0.4 +0.6 +1.0 +S4 +S3 +0.5 +0.51.1. +Mathematical preliminaries +is called the model evidence or marginal likelihood. The posterior distribution over the +models P(Model | D) represents our beliefs about the system after observing the data +D. Bayes rule provides a principled way of updating our beliefs about the system after +observing data. Formally, +P(Model | Data) = P(Data | Model) P(Model) +P(Data) +. +(1.2) +Dirichlet distribution learning of categorical probability values +Consider a random variable which can take on K possible categories. The categorical +distribution is a discrete probability distribution for the random variable, where the +probability of each category is separately specified. +The categorical distribution is a +generalisation of the Bernoulli distribution for a discrete variable with more than two +outcomes, such as the probability of outcomes for a 6-sided die. It is also a special case +of the multinomial distribution where the number of trials in one. +If the probabilities of each outcome for a categorical distribution are unknown, using +Bayesian learning, we can update prior probability distributions of probability values. +The Dirichlet distribution is a conjugate before the multinomial (and categorical) distri- +bution, meaning starting with a Dirichlet prior and multinomial likelihood, the resulting +posterior is also a Dirichlet distribution. The probability mass function for the Dirichlet +distribution DirK(α) with K categories is +f(p|α) = +1 +B(α) +K +� +i=1 +pα(i)−1 +i +, +(1.3) +where α = (α(1), . . . , α(K)) is the vector of parameters. Furthermore, +B(α) = +K +� +i=1 +Γ(α(i)) +Γ +� �K +i=1 α(i)� +where for positive real number n, +Γ(n) = +� ∞ +0 +xn−1e−xdx. +For integer values, Γ(n) = n! . +To learn probabilities for a categorical distribution, given a prior distribution DirK(α) +4 + +1.1. +Mathematical preliminaries +over the probability vector p = (p1, . . . , pK), and data c = {c1, . . . , ck} representing the +number of observation Dirichlet category, the posterior distribution is +P(p|x) = (p|x + α) +p|x ∼ DirK(x + α) +(1.4) +Finally, the Beta distribution is a special case of the Dirichlet distribution where the +outcomes are binary (true or false). To distinguish this special case, we may use the +notation Beta(β(1), β(2)) ≡ Dir2(α) where α = {β(1), β(2)}. +5 + +Chapter 2 +The experiment +In this chapter, I describe the experimental setup in Khan et al. (2018) and Poort et al. +(2015) for which we have developed a behavioural model in the later chapters. A summary +of previous findings and a description of the behavioural data accompanied by figures are +also included. +2.1 +Experimental setup +The experimental setup involves the placement of the mouse on a cylindrical treadmill +where its head is fixed to enable imaging of neural activity. The mouse can move forward +(and backward). In front of the mouse, a screen is shown to the animal where visual +feedback connected to the movement can simulate the movement of the subject in an +environment. By controlling the setup of the space and visual stimulus while allowing +imaging, the VR setup has been extensively used for studying the visual cortex and +hippocampus in mice in recent years (Harvey et al. 2009, Dombeck et al. 2010, Khan +et al. 2018, Poort et al. 2015, Saleem et al. 2018). Figure 2.1 illustrates the VR setup. +Figure 2.1: Movement in a corridor simulated in the VR environment. +6 + +2.1. +Experimental setup +Specifics of the corridor space and reward administration +We specifically consider the experimental setup described in Khan et al. (2018), Poort +et al. (2015). In these two studies, the activity of populations of neurons in V1 was mea- +sured with two-photon calcium imaging Chen et al. (2013) during a visual discrimination +task in a virtual reality (VR) environment. Head-fixed mice ran through a simulated +corridor where different types of visual stimuli were displayed on the walls. Three types +of wall patterns characterise the different corridors. In the grey corridor a short stretch +of circle patterns followed by grey walls for a random distance, before the pattern on +the walls abruptly changes to one of the grating corridors. The grating corridors either +displayed vertical gratings (illustrated in Figure 2.1) or angled gratings for a fixed length +(60 VR length units), before the grey corridor. An illustration of the corridor space is +displayed in Figure 2.2. +Figure 2.2: Illustration of the corridor space. +A milk dispenser was placed in front of the mouse to administer rewards. Mice received a +reward for licking the dispenser in a reward zone starting halfway in the vertical grating +corridor and halfway for around 10 VR-length units. If the mouse licked the dispenser in +the reward zone, it would trigger the opening of the reward valve and a drop of soy milk +would appear at the dispenser. No punishment was given for licking in the corridors with +grey and angled grating walls. All mice learnt to discriminate the two stimuli, starting at +the chance performance (behavioural d′ close to zero) and reaching the threshold criterion +of d′ > 2.0 within 5-9 days. +7 + +reward zone2.2. +Behavioral data and observations +summary of previous findings +The motivation behind developing a behavioural model is to take advantage of the be- +havioural data for the future analysis of experiments similar to Khan et al. (2018). A +summary of results in Khan et al. (2018) is as follows. After learning the visual discrim- +ination task, neurons showed increased stimulus selectivity for the angled and vertical +gratings. Interestingly, this effect depended on the cell types. In particular, stimulus se- +lectivity for populations of pyramidal cells (PYR) along with parvalbumin (PV), somato- +statin (SOM), and vasoactive intestinal peptide-expressing (VIP) inhibitory interneurons +in layer 2/3 (L2/3) of the primary visual cortex (V1) were compared. Selectivity was +increased for PYR and PV cells. PV neurons became as selective as the PYR cells, and +showed changes in functional interactions, particularly with PYR cells. On the other +hand, SOM neurons became decorrelated from the network and PYR–SOM coupling +before learning predicted selectivity increases in individual PYR cells. While SOM inhi- +bition seemed to gate changes in selectivity, PV cells provided strong stimulus selective +inhibition after learning. A multivariate autoregressive linear model (MVAR model) fit- +ted the activity of the neurons, and further supported the statistical analysis results. +However, the MVAR model arguably neglects potentially important information in the +behavioural data. Even though speed is taken into account, its contribution to the be- +haviour of the MVAR model is negligible. Accordingly, one of the primary motivations of +the behavioural model proposed in this report is potential improvements in the (MVAR +model). This is discussed in more detail in Chapter 5. +2.2 +Behavioral data and observations +Behavioural data were collected during the experiment. The distance travelled from the +onset of the current corridor and the corridor type (determined by the wall patterns) is +continuously recorded. The observed variables of spatial location and visual stimuli at +each time are marked by a pair (x, y) ∈ (xLoc × Cor), where x is the distance travelled +from the onset of the current corridor pattern, and y is the corridor pattern. Set xLoc = +[0, max(x)] is an interval from 0 to the maximal length of an interval max(x) and set +Cor = {grey, vertical, angled} is the set of corridor types. The speed of the subject at +each time is also recorded. A list of licking times and valve opening times (indicating +reward administration) is also given by the data. +For the generative behavioural model in Chapter 4, we discretize the data into time +intervals of ∆τ seconds, each identified by an index t ∈ {1, 2, . . . , N}. +The value of +∆τ determines the time resolution of the behavioural data. Since the imaging data of +8 + +2.2. +Behavioral data and observations +Khan et al. (2018) is taken in 1 +8 second intervals, time resolutions lower than 1 +8 seconds +are not useful. +Higher time resolutions may be desirable because they will decrease +the computational cost of the analysis, but the cost of losing time resolution must be +discussed. However, unless explicitly discussed, we can assume ∆τ = +1 +8 for the data +analysis. Table 2.1 describes the notation used to describe the data. Note that some +of the records are behavioural, while others specify the values that are observed by the +subject. +Table 2.1: Behavioral and observational records for t ∈ {1, 2 . . . , N}. +Data +Type +Description +xt +Observation +xt is the true value of distance from the onset of the current +corridor at time step t. +yt +Observation +yt ∈ Cor = {grey, vertical, angled} is the true value of the +corridor type, which determines the visual stimuli at time +step t. +ot +Observation +ot is a binary value for whether the reward valve has opened +during the time step. +vt +Behavior +Speed (average) at time step t +lt +Behavior +Number of licks at time step t +Instance of data visualisations +The Figures below are instances of behavioural data visualizations from the experimental +data. Figures 2.4 and 2.3 (Poort et al. 2015) illustrate the licking behaviour at different +positions in different corridors, and Figures 2.5 (Poort et al. 2015) and 2.6 (Khan et al. +2018) give a colour map of speed at the different positions in the different corridors. For +all Figures, the horizontal axis represents the position concerning the onset of the grating +corridor 1, and the vertical axis is the trial index. Higher trial numbers are later. The +black or red labels are data labels the ls associated with the experimental sessions. +The following observations about the licking behaviour have influenced parameter defini- +tions and assumptions about prior beliefs of the animal in Chapter 3. These observations +are consistent among all subjects. +reward association prior: The mice do not know reward associations before the +reward. However, the mice know that moving forward and licking the dispenser may +lead to a reward. Initially, the licking behaviour is frequent to explore the space +and discover reward associations. A uniformly random prior for reward probability +may be appropriate. +1note that for the grey corridor this is obtained by shifting xt by the length of the grey corridor +9 + +2.2. +Behavioral data and observations +Change of visual discrimination: The behaviour of the mice in the grating +area and the grey area starts to diverge immediately, and the behaviour of the +mouse in angled and vertical grating corridors seems to be similar at first; the +differences of licking behaviour seem to be only after the reward is present in the +vertical grating corridor. The dissociation of the award from the angled grating is +realised substantially later than the dissociation of the reward from the grey area. +It seems that at different points in the trial, the set of visually discriminated stimuli +is different. +Location is also taken into account As the learning progresses, the licking +concentrates close to the reward zone. It seems that the mice associate a spatial +region, characterised by both visual stimuli and spatial positioning, with the reward +area. +The following observations about the speed have influenced our generative model of speed +in Chapter 4.3. These observations are consistent among all subjects. +Reward association influences speed: the graphs suggest that the dissociation +of reward in upcoming regions is associated with higher speed while anticipation of +reward in upcoming regions is associated with reduction of speed. +Evidence for change in the internal model: while speed behaviour in the +grey corridor diverges from the grating corridor quickly, the divergence of speed +behaviour for angled grating and angled grating happen at a later point. +This +suggests that the mice initially correlate the grating areas with the reward, and +then learn to differentiate between the grating areas to dissociate the angled grating +with the reward. +Change of visual discrimination:: Similar to the licking behaviour, initially +speed behaviour seems to discriminate between the angled and vertical gratings +only after the reward is present in the vertical grating corridor. +This suggests +that the mice initially correlate the grating areas with reward, and then learn to +discriminate between the vertical and angled grating areas. +10 + +2.2. +Behavioral data and observations +Figure 2.3: Lick locations for M27. See the figure descriptions below. +Figure 2.4: Lick locations for M31 in all trials. The horizontal axis represents the location in +a corridor, with 0 being set at the onset of a grating corridor. Negative values are in the grey +corridors and positive values are in the grating corridors. The licking locations are marked by +coloured points. Red dots represent licking within 1 length unit before a valve opening, and +yellow indicates the licking after the opening of the reward valve, in a grating corridor. All +other lick locations are marked in black. The trial number on the vertical axis shows the +sequential order of the trials in each plot. The right plot shows all trials, where each trial is +passing through one grey corridor followed by a grating corridor. The middle and the left plots +show a closer look at the vertical and angled grating corridors. The red labels are labels for the +experimental sessions. +11 + +grey walls +vertical gratings + angled gratings +Unrewarded Lick +Lick after valve opens Lick within 1 unit before valve opens +M27-date:20130515b-B2 +M27-date:20130515b-B2 +M27-date:20 130515b-B2 +2000 +900 +M27-date:20130514b-B3 +M27-date:20130514b-B3- +M27-date:20 130514b-B3 +1000 +M27-date:20130513b-B3-.. +M27-date:20130513b-B3 +800 +M27-date:20130513-B1 +M27-date:20130513-B1 +M27-date:20130513-B +1600 +M27-date:20130511-B2 +M27-date:20130511-B2--- +800 M27-date:20130510-B +1400 +M27-date:201305 +M27-date:20130510-B1 + 600 +No. +M27-date:20130509-B +1200 +M27-date:20130509-B +Trial +2 +M27-date:20130509- +600 +. +Angled-g +1000 +rtical +M27-date:20130508-B +M27-date:20130508-B +400上 + M27-date:20130508-B +800 +400 +300 +600 +M27-date:20130507-B +M27-date:20130507-B +M27-date:20130507-B +200 +400 +200 +date:20130506 +M27-date:201305 +B +100 +M27-date:20 130 +200 +0 +0 +. +0 +-250 +-200 +-150 +-100 +-50 +0 +50 +0 +20 +40 +60 +0 +20 +40 +60 +x (VR length unit) +x (VR length unit) +x (VR length unit)grey walls +vertical gratings + angled gratings +Unrewarded Lick +Lick after valve opensLick within 1 unit before valve opens +M31-date:20130612b-B2 +M31-date:20130612b-B2 +800 +1600 M31-date:20130612-B1 +M31-date:20130612-B +M31-date:20130612-B1 +M31-date:20130611b-B8 +800 +M31-date:20130611b-B8 +M31-date:20130611b-B8 - +700 +1400 M31-date:20130611-B2 +M31-date:20130611-B2 +M31-date:20130611-B2 +700 +M31-date:20130610b-B3 +600 +M31-date:20130610b-B3 +M31-date:20130610b-B3 +1200 +600 +M31-date:20130610-B1 +No. +M31-date:20130610-B1 +诚 +500 + M31-date:20130610-B1 +.... +1000 +Trial +500 +Trial +M31-date:20130609b-B2 +M31-date:20130609b-B2 +9 +M31-date:20130609b-B2 +M31-date:20130609-B1 +M31-date:20130609-B1 +. +800 +M31-date:20130609-B1 +Ver +M31-date:20130608-B2 +M31-date:20130608-B2 +M31-date:20130608-B2 +600 +... +300 +300 +M31-date:20130607-B3-- +M31-date:20130607-B3 +M31-date:20130607-B3 +M31-date:20130606-B2 +400 +200 + M31-date:20130606-B2 +200 M31-date:20130605-B4 +M31- date:20130605-B4 +M31-date:20130605-B4 +M31-date:20130604:B3 : +M31-date:20130604-B3 +200 +100 +100 +M31-date:20130604-B3- +M31-date:201306031 +M31-date:20 130603b-B3 +M31-date:20130603b-B3 +0 +M31-date:20130603 +M31-date:20130603-B2 +-300 +-200 +-100 +0 +0 +20 +40 +60 +20 +40 +60 +x (VR length unit) +x (VR length unit) +x (VR length unit)2.2. +Behavioral data and observations +Figure 2.5: Speed vs location for M31. See the figure descriptions below. +Figure 2.6: Speed and licks vs location for M70. The horizontal axis represents the location in +the corridor, with 0 being set at the onset of a grating corridor. Negative values are in the grey +corridors and positive values are in the grating corridors. The trial number on the vertical axis +shows the sequential order of the trials in each plot. The right plot shows all trials, where each +trial is passing through one grey corridor followed by a grating corridor. The middle and the +left plots show a closer look at the vertical and angled grating corridors. The colour for each +location of each trial represents the speed of the animal at that point according to the colour +scale; warmer colours represent higher speeds and cooler colours represent lower speeds. Note +that for Figure 2.5, the speed is averaged over 5 unit intervals due to virtual memory limits. +The white points show the lock locations for M70, and the small black star indicates a valve +opening location during a trial. The black labels are data labels associated with experimental +sessions. +12 + +speed (units per second) +0 +10 +20 +40 +50 +M31-date:20130612b-B2 +L M31-date:20130612b-B2 +900 +M31-date:20130612b-B2 +M +800 +1600 M31-date:20130612-B1 +M31-date:20130612-B1 +M31-date:20130612-B1 +M31-date:20130611b-B8 +800 +M31-date:20130611b-B8 +M31-date:20130611b B8 +700 +1400 |M31-date:20130611-B2 +M31-date:20 130611-B2 +M31-date:20130611-B2 +700 +M31-date:20 1306 10b-B +600 +M31-date:20130610b-B3 +600 +No. +M31-date:201306 10-B1 +M31-date:20 1306 10-B1 +500 +FM31-date:20130610-B1 +1000 +Trial +500 +Trial +M31-date:20130609b-B2 +M31-date:20130609b-B2 +9 +M31-date:20130609b-B2 +M31-date:20 130609-B1 +M31-date:20130609-B1 +800 +M31-date:20130609-B1 +400 +Ver +M31-date:20130608-B2 +M31-date:20130608-B2 +M31-date:20130608-B2 +600 +300 +M31-date 20 130607- B3 +M31-da +M31-da +qe:20130607-B3 +200M31-da +400 +te:20130606-B2 +M31-date:20130605 B4 +M31-date-20130604 B3 +M31-date:20 130604-B3 +200 +100 +100 +M31-date:20 130604 B3 +M31-date: 20130603b- B +M31-date:20 130603b-B3 +M31-date:20130603b-B3 +0 +M31-date.20130603-B2 +-300 +-200 +-100 +0 +0 +20 +40 +60 +U +20 +40 +60 +x (VR length unit) +x (VR length unit) +x (VR length unit)speed (units per second) +0 +10 +20 +30 +40 +50 +M70-date:20141028-B1 +M70-date:20141028 B7 +M70-date:20141028-B1 +200 +160 +350 +180 +140 +300 +160 +120 +250 +140 +I No. +No. +Angled-g Trial l +100 +Trial No. +Trial +120 +200 +Vertical-g +100 +80 +150 +80 +M70-date:20141022-B1 +60 +M70-date:20141022-B1 +60 +M70-date:20141022-B1 +100 +40 +40 +50 +20 +20 +0 +0 +-250 +-200 +-150 +-100 +-50 +0 +50 +0 +20 +40 +60 +0 +20 +40 +60 +x (VR length unit) +x (VR length unit) +x (VR length unit)Chapter 3 +Behavioral model part 1: internal +representations +The behavioural model presented here provides a framework for inferring an internal +model that can predict the animal’s behaviour at a given time. Before getting into the +specifics, consider a broad perspective on inferring a model that generates the current be- +haviour by incorporating past experiences. Figure 3.1 is a graphical model of big-picture +relation between the history of animal’s observations H, the internal model M that in- +corporates experience into internal representations, and the observed behaviour B. This +chapter discusses the relationship between the history of observations and behaviorally +relevant representations in the internal model (H → M in the graphical model of Figure +3.1). I introduce a space of models where a range of hypotheses about the internal model +can be systematically examined. The internal representations about reward and space +are then used in the next chapter to construct a generative model of behaviour (M → B +in the graphical model of Figure 3.1). Then using a systematic approach, an internal +model is inferred that best describes the data (H and B). +Figure 3.1: Relation between history of experimental observations H, internal model M, and +behavior B. H and B are observed in the experimental data, but the internal model M is +unobserved. +13 + +H +M +B3.1. +Structure of spatial states +By exploring and experiencing the environment, the brain uses experience to update its +beliefs (i.e., learning) about the environment using its internal representations. In this +learning model, the normative framework of Bayesian ideal observer analysis (Geisler +2003, 2011) is used to learn behaviorally relevant internal representations. These include +learning about the probability of reward in different regions of the VR corridor, and +expectations about upcoming spatial regions when moving forward1. +Model of spatial states in Section 3.1 describes how the space (VR corridor) is divided into +states corresponding to spatial segments, where the representation of reward probability +within a state only depends on the information (history of reward outcomes) obtained at +that state. The structure of these states is a Markov chain. The space of models in Section +3.2 prescribes a range of Markov chain structures of spatial states within which a model is +selected. For given states of a model, the dynamics for learning reward associations and +state transitions are considered within the normative framework of the Bayesian ideal +observer model in Section 3.3. +3.1 +Structure of spatial states +Animals’ observation of visual stimuli and spatial positioning is an observation of the +current (x, y) ∈ {xLoc, Cor}. +Observations about reward association at the current +location (x, y) may be relevant to reward association at some other locations. +It is +therefore necessary to define spatial regions where reward observations are relevant to +the entire region but explicitly irrelevant to other regions. To formalise this concept, +the objective of this section is to associate the segments of space with states where the +information about reward association is relevant to the current state and no other state. +A reasonable way to define such states is to group areas that are spatially close by, visually +similar, or both. +Defining states associated with spatial segments +Taking into account both spatial proximity and visual similarity, consider sectioning xLoc +into a finite set of mutually exclusive spatial segments each identified by a fixed y, and an +interval Ix for x values. We illustrate an example of spatial segmentation in Figure 3.2. +Denote by S a set of states and associate each segment with only one state (note that +multiple segments may be associated with the same state). Then we say that the mouse +is in state s if its position (x, y) is inside a segment that is associated with s. We associate +all positions in all corridors with only one state with the function f : (xLoc × Cor) → S. +1The subject can only move forward due to the experimental setup. +14 + +3.1. +Structure of spatial states +The mouse may map locations onto states in multiple ways. By considering various ways +to map between locations and states, we can infer the mapping that best matches the +behavioural data (see 4.4). +Spatial state transition event and structural properties +Let Xk be the random variable describing the k-th visited spatial state, where a spatial +state transition event (i.e., transition to the next spatial step) happens when the subject +crosses the initial point of a segment associated with a state2. Given the current position, +the future positions do not depend on the history of visited positions, so given Xk, state +Xk+1 is conditionally independent of Xn for n < k. It follows that the state structure as +defined above satisfies the Markov property. +We assume that the spatial states are fully observable. In other words, given a state +structure, we assume that the subject always knows which state is the current state. +Observations of the animal may be noisy and inaccurate, so assuming fully observable +states is a simplification that may be contended with in a more sophisticated future +model. However, states are associated with intervals of space rather than precise points +in space, and they already incorporate some approximation about the spatial awareness +of the subject. +We assume that the mouse learns two things from the visual stimuli and licking in state +s. First, it learns the reward association in that state. Second, it learns the transition +from that state to other states. Let r(s) be the probability that licking in state s leads to +reward in state s. Also, denote by p(s,s′) = P(Xk+1 = s′|Xk = s) the transition probability +Figure 3.2: An example of dividing the corridor space into mutually exclusive spatial +segments. Each segment is then associated with exactly one state. +2Note that the time spent in each state is not fixed in this Markov model. +15 + +3.2. +Space of models for spatial states +of visiting any state s′ after s. These parameters are initially unknown to the mouse and +should be learned. In Section 3.3, I discuss a semi-normative model of learning for these +parameters using the ideal observer framework. +It is worth noting that the state transitions of the Markov chain are sparse. To understand +the sparsity of state transitions, first note that x is a positive real value, which ranges +from 0 to the maximal length of a corridor with the same patterns, and y is a discrete +value with three possible entries. From the onset of a corridor, until the onset of the next +corridor, the spatial location is a continuous function of time. Within the period between +two consecutive onsets, if a state transition happens, it can only be to the state associated +with the next interval of x, with the same y. Moreover, when passing the onset of the +next corridor, there is a discrete change in the value of y, and x = 0 at the onset of the +new corridor. This event can only be a state transition to the start of a new corridor (a +state that starts at x = 0) so there are at most three such possible transitions. It follows +that the structure of states is a sparse Markov chain. +3.2 +Space of models for spatial states +To define a space of models M , we use two parameters for identifying a model in the +model space; one for the set of discriminated patterns (V), and one for the length of +segments (d). +Spatial model parameter V: set of discriminated visual stimuli +Let V be the set of visual stimuli that are discriminated in the spatial state model. The +set of possible choices for V is {V1, V2, V3} which are described below. +• V1 = {u := undifferentiated}, where the grey and grating are not discriminated. +• V2 = {g := grey, va := angled or vertical grating}, where the grey corridor is dis- +criminated from the grating corridors, but where angled and vertical grating corri- +dors are not discriminated. +• V3 = {g := grey, v := vertical, a := angled}, where the grey corridor, the angled and +vertical grating corridor are discriminated. +While set Cor contains the types of visual stimuli on the corridors, set V refers to subjec- +tive visual discrimination (or classification) between corridors by the mouse. Also note +that the choices for set V implicitly contain a mapping from Cor to V. +16 + +3.2. +Space of models for spatial states +Spatial model parameter d: length of states +Denote by d a value in the interval (0, max(x)] for the length of spatial segments. Value d +uniquely defines a sequence of intervals of x values. For example, the associated sequence +of intervals to d = 30 is {[0, 30), [30, 60), . . .}. Then state sij is associated with the j-th +interval of x, which is [(j −1)d , jd), and i ∈ C identifies the visual stimuli. For example, +for V = {g, p} and d = 30, the state sp,2 refers to intervals of x ∈ [30, 60) for both the +vertical and angled grating corridors. +Model space +Now it is possible to introduce a Markov model MV,d ∈ M with the set of states S that +are associated with the spatial intervals induced by V and d. Since the length of the a +Figure 3.3: Nine instances of Markov chain models MV,d for choices of V selected instances +of d. For d = xmax, there is only one state per and self transition event only occurs when the +corridor type changes. The length of the angled and vertically grating corridors is exactly 60 +(VR length units) in the experiment. So for d = 60 and d = 20, there are exactly 1 and 3 states +associated with the relevant element in V. Note that the figure illustrates only selected instances +of the model space M . +17 + +Mv,d +V = V1 +V = V2 +V = V3 +[u] +(g ,va) +(g ,V,a) +d = max(x) +09 = p +d = 203.3. +Bayesian learning model +Table 3.1: Parameters for the model of spatial states +. +Parameter +Type +Description +V +Spatial model +parameter +Set of discriminated visual stimuli on the corridors in the +model MV,d; Possible options are V1 = {u}, V2 = {g, p} +and V3 = {g, v, a}. +d +Spatial model +parameter +A constant length in (0, max(x)] for the length of the +spatial for model MV,d. +corridor is bounded by max(x), model MV,d is a finite state Markov model. For example, +MV1,max(x) and MV3,max(x) have exactly one and three states, respectively. +Figure 3.3 +illustrates the states of Markov chain models MV,D for example cases of V and d. +Parameters V and d are free parameters that will be set during the model selection, which +will be further discussed in Section 4.4. The fit for parameter V, selected from V1, V2 +or V3, is determined by which stimuli the animal discriminates. The true value for d is +the length of spatial segments where information about reward associations and state +transitions in the current segment is reasonably independent of segments associated with +other states. For the sake of simplicity, it is assumed that d is a fixed value, and it is +the same across different visual stimuli. However, relaxing this assumption is possible by +having more free parameters, for example, by introducing a free parameter of distance +for each element of V. For example, suppose V = V3. Then instead of a free parameter d, +we could use three parameters in D = {dg, da, dv} which contains one free parameter of +distance for every element of V. In the initial implementation of the model, one parameter +d is considered. +In summary, parameters V and d for a model MV,d determine the structure of the states +in the Markov chain, where for each state the learning dynamics about reward association +and state transitions is only dependent on the observations in that state. The learning +dynamics are discussed in the next section. +3.3 +Bayesian learning model +As first noted in Section 3.1, in any state s, the subject uses sensory information to +learn r(s), the probability that licking in s leads to the administration of reward in s, or +reward probability of s for short. Furthermore, state transition probability p(s,s′), which +is the probability of visiting state s′ after visiting s, is also unknown to the subject and +it is learned. +Here, we use the ideal observer framework (Geisler 2003) to develop a +semi-normative model for learning both reward associations and state transitions. In this +18 + +3.3. +Bayesian learning model +section, the learning dynamics are discussed for a given model M ∈ M . Therefore, states +S and their associated spatial intervals are unambiguous. +3.3.1 +Learning reward probability within a state +Recall that reward is given to the subject immediately after the subject licks the dispenser +in the reward zone (see Section 2.1 for details of the experimental setup). The reward is +a fixed amount of milk administered via the dispenser. We noticed that even in trained +animals, licking started before the reward zone (see example mice in Figures 2.3 and 2.4). +This suggests that the mouse associates an extended region with the reward delivery +which starts before the reward zone set by the experimenters. +Reward outcome Rk of current spatial step k +If the mouse licks the dispenser in state s, it collects some information about the unknown +parameter r(s). If the subject does not lick the dispenser, it obtains no information about +r(s). Let the random variable Rk = (R(T) +k , R(F) +k ) be the reward outcome of spatial step +k, where R(T) +k +counts the number of positive outcomes, and R(F) +k +counts the number of +negative outcomes in spatial step k. As a consequence of the experimental setup, the +amount of reward and the frequency of licking in the experiment does not provide any +additional information about a reward region. Furthermore, spatial states are defined to +be regions where licking at different points within the region does not provide additional +information about the reward. Therefore, each visit to a state provides only three possible +reward outcomes: +• Rk = (1, 0) for subject licking the dispenser in spatial step k followed by reward +becoming available in spatial step k, +• Rk = (0, 1) for subject licking the dispenser in spatial step k followed by no reward +in spatial step k, and +• Rk = (0, 0) for subject not licking the dispenser in spatial step k. +Normative model for updating internal reward representations (Bayesian) +Let us first discuss how an ideal observer updates its prior beliefs about r(s) after visiting +state s in spatial step k. The ideal observer provides a theoretical upper limit of perfor- +mance, given the collected data. It is therefore a normative framework for updating the +beliefs about reward association. Let prior beliefs about r(s) right before visiting spatial +19 + +3.3. +Bayesian learning model +step k be a Beta distribution +Beta(β(1) +k (s), β(2) +k (s)) +over the interval [0, 1]. The reward outcome Rk = (R(T) +k , R(F) +k ) is the data that is newly +collected about the reward. By Equation 1.4, the posterior is +r(s)|Rk ∼ Beta(R(T) +k ++ β(1) +k (s), R(F) +k ++ β(2) +k (s)). +Reward learning rate ηr +The above is a theoretical bound on learning from observations in state s, assuming a +prior Beta distribution over [0, 1] for the reward probability r(s). Some mice learn faster +than others, and all of them will perform no better than the ideal observer model above. +To allow for individual differences, and different learning rates, we introduce a model +parameter ηr ∈ [0, 1], which dials the amount of data required for the same amount of +learning as an ideal observer. The update rule (i.e., posterior) is +r(s)|Rk ∼ Beta(ηrR(T) +k ++ β(1) +k (s), ηrR(F) +k ++ β(2) +k (s)). +To keep track of learning parameters, let Bk = +� +βk(s) := +� +β(1) +k (s), β(2) +k (s) +� +: s ∈ S +� +be +the beta parameters for beliefs about reward probabilities of all states in spatial step k. +Note that after visiting state s in spatial step k, +βk+1(s) = ηrRk + βk(s) +for s = Xk, and +(3.1) +βk+1(s′) = βk(s) +for s′ ̸= Xk. +Note that ηr is defined to have the same value across all states. If ηr = 1, the mice +performs as well as the normative ideal observer, and if ηr = 0, the mouse never learns +reward associations. For the values in between 0 and 1, the mouse requires extra data +points for updating its beliefs to the same extent as an ideal observer model. The model +parameter ηr can be interpreted as the data efficiency of learning. It could be used to +compare individual learning differences among subjects. Furthermore, it is interesting +to assess whether differences of ηr in individuals is predictive of comparative learning +rates on other learning tasks. It also provides a qualitative way to assess the model. For +example, if the value is unreasonably high, it may indicate a flaw in the state structure +20 + +3.3. +Bayesian learning model +Table 3.2: Guide for variables (Var) and parameters (Par) relevant to internal reward +representations. +. +Var/Par +Type +Description +Rk +observed +A binary pair representing the reward outcome of step k, +(1, 0) lick and reward within step k +(0, 1) lick but no reward within step k +(0, 0) no lick within step k +B(k) +inferred +List of +� +β(1) +k (s), β(2) +k (s) +� +, for all s ∈ S, where +Beta( +� +β(1) +k (s), β(2) +k (s) +� +) represents the beliefs about +r(s) at spatial step k. +ηr +model parameter +A constant in the [0, 1] interval for learning rate of +reward association. +or an incorrect choice of prior. +Implementation notes +To simplify model implementation, we can derive the posterior distribution at step k by +merely keeping a list record of the total count of positive and negative reward outcomes +in state s. In particular, at step k, for state s, let ck(s) = +� +c(T) +k (s), c(F) +k (s) +� +be the total +count of positive and negative outcomes in state s, from step 1 up to the start of step k. +That is, +ck(s) = +k +� +n=1 +Xn=s +Rk. +For current spatial state k, a list of numbers can store values of ck(s). +Assuming a +uniform prior at the start of the experiment, or β(1) +1 (s) = β(2) +1 (s) = 1, the prior probability +distribution of r(s) at step k is +r(s) ∼ Beta(ηrc(T) +k (s) + 1, ηrc(F) +k (s) + 1), +for which, +βk(s) = ηrck(s) + 1. +(3.2) +3.3.2 +Learning state transitions +Learning dynamics for state transitions p(s,s′) is defined similarly to the reward associ- +ations. Let E be the set of transition edges (directed edges), and let Adj(s) = {s′ : +21 + +3.3. +Bayesian learning model +(s, s′) ∈ E} be the set of states which for Xk = s, outcome of Xk is in Adj(s). There- +fore, transition probabilities from s, P(Xk+1|Xk = s) is a distribution of outcomes over +Adj(s). Assuming fixed probability transitions, P(Xk+1|Xk = s) can be represented by a +list of probabilities p(s) := +� +p(s,s′) : s′ ∈ Adj(s) +� +. Note that if the subject is not familiar +with the space, the true distribution is unknown, and the subject learns about these +probabilities through experience. +Normative model for updating internal transition representations +(Bayesian) +Every time the subject leaves state s and the next step is observed, one observation is +made about the outcome of Xk+1 given Xk = s. Because the outcome is a multinomial +random variable, where possible outcomes are states in Adj(s), we use a Dirichlet prior +distribution to represent uncertainties about p(s). Specifically, at spatial step k, +p(s) ∼ Dir +� +αk(s) +� +where the list of parameters αk(s) contains an element corresponding to each possible +outcome. In particular, +αk(s) = +� +αk(s, s′) : s′ ∈ Adj(s) +� +. +Suppose Xk = s and consider an ideal observer whose prior beliefs about p(s) at spatial +step k is described by Dir(αk(s)). Also suppose, the ideal observer visits the next state +and makes the observation Xk+1 = ˘s. Then by Equation 1.4, the posterior distribution +is +p(s)|(Xk+1 = ˘s, Xk = s) ∼ Dir +� +αk+1(s) +� +where any element αk(s, s′) of αk+1(s) is updated as follows: +αk+1(s, s′) = 1 + αk(s, s′) +for s′ = ˘s, and +αk+1(s, s′) = αk(s, s′) +for s′ ̸= ˘s. +Furthermore, for any other state s′′ ̸= s, it is obvious that the beliefs are not updated, +i.e., αk+1(s′′ ̸= s) = αk(s′′ ̸= s). +22 + +3.3. +Bayesian learning model +Table 3.3: Parameter guide for learning transition probabilities +. +Parameter(s) +Type +Description +(Xk+1|Xk) +observed +Transition outcome from a given state Xk +Ak +inferred +List of αk(s), for all s ∈ S, where Dir(αk(s)) +represents beliefs about p(s) at step k (list of state +transition probabilities from s to adjacent states) +ηp +free parameter +A constant in the [0, 1] interval for learning rate of +transition probabilities +Reward learning rate ηp +Similar to introducing a learning rate for learning reward association, we introduce a +ηp ∈ [0, 1] to account for data inefficiency compared to the ideal observer. Denote by Ak, +the list of all learning parameters of state transition probabilities Ak = +� +αk(s) : s ∈ S +� +. +Now, the update rule (posterior distribution) is +p(s)|(Xk+1, Xk) ∼ Dir +� +αk+1(s) +� +where any element αk(s, s′) of a list of parameters in Ak is updated as follows: +αk+1(s, s′) = ηp + αk(s, s′) +for s = Xk and s′ = Xk+1 +(3.3) +αk+1(s, s′) = αk(s, s′) +otherwise. +For an ideal observer, ηp = 1. +The lower the value of ηp is, the slower the learning +becomes, because the subject would require more data for similar updates in beliefs. If +ηp = 0, the subject never learns from observing consecutive states. Note that the same +parameter ηp is used for learning all transition probabilities. +Implementation notes +For prior beliefs about state transitions, a uniform prior would ensure that the prior does +not privilege any probability value over another probability value. Then, for any entry +α1(s, s′) of α1(s, s′), we assume that α1(s, s′) = 1 +So, at spatial step k, for entry αs′(s, k) of α(s, k), +αk(s, s′) = ηpc(s,s′)(k) + 1. +(3.4) +23 + +3.3. +Bayesian learning model +where c(s,s′)(k) is the total number of observed transitions from s to s′ from step 1 to step +k. By keeping track of c(s,s′)(k) in a matrix, any parameter in A(k) can be calculated on +demand using Equation 3.4 for the current state. +24 + +Chapter 4 +Behavioral model part 2: the +generative model +In the previous chapter, I discussed the internal representations of spatial regions and +reward probabilities within those regions. This chapter describes a model that utilizes +internal representations to generate behaviour. The learning model for updating beliefs +about reward probabilities and state transitions utilized a normative model of Bayesian +learning. In contrast, we present a descriptive model of behaviour that does not explic- +itly enforce any optimal decision-making criteria. Before making normative assumptions +about behaviour, it is important to have a descriptive framework for systematically as- +sessing assumptions about behaviour. +Recall that location, visual stimulus, licking and speed of the mouse are recorded in the +experimental data (see Chapter 2.2). To improve readability, Table 4.1 includes notation +used to represent the behavioural data. +A spatial state transition event triggers updating internal representations of reward prob- +ability and spatial transitions. During the period between two transition events, the pa- +rameters associated with internal representations (specified by elements of Bk and Ak) +are unchanged. Assuming that the internal representations are guiding the behaviour, +we define behavioural parameters for speed and licking rate derived from internal rep- +resentations’ parameters. Figure 4.1 describes the conditional dependence structure of +parameters associated with a spatial state. In this model, the internal representations +are used to derive two parameters that guide the licking and speed behaviour. These +25 + +4.1. +Spatial state parameter ˜λk: licking rate +Table 4.1: Behavioral and observational records for t ∈ {1, 2 . . . , N}. +Data +Type +Description +xt +Observation +xt is the true value of the distance from the onset of the current +corridor at time step t. +yt +Observation +yt ∈ Cor = {grey, vertical, angled} is the true value of the +corridor type, which determines the visual stimuli at time +step t. +ot +Observation +ot is a binary value for whether the reward valve has opened +during the time step. +vt +Behavior +Speed (average) at time step t +lt +Behavior +Number of licks at time step t +parameters are target speed ˜νk, and licking rate ˜λk, and they are discussed in detail in +the Section 4.2 and Section 4.1 respectively. +Table 4.2: Description of updating internal representations of a given step using the graphical +model of 4.1. Variables ( Var.) and their parents (Par(.)) are included in the first and second +columns respectively. The third column (Type) indicates whether the outcome of the variable +given its parents is stochastic ( Stoch.) or deterministic ( Deter.) given its parents. The +conditional dependence of the variable on its parents is described in the last column. +Var. +Par(.) +Type +Update description +Xk+1 +Xk +Stoch. +Stochastic outcome of the state immediately +following Xk. +Bk+1 +Bk, Rk, Xk +Deter. +Updating reward probability distribution of the +previous state using Equation 3.1. +Ak+1 +Ak, Xk+1, Xk +Deter. +Updating the transition probability distribution +for the last transition using Equation 3.3. +distributions for reward, to Bk+1 +rk +Bk, Xk +Deter. +Reward distribution of the current state +γk(ρ) +Bk, Ak, Xk +Deter. +Discounted reward probability of present and +future states given by Equation 4.6, with the +discount factor ρ. +˜νk +γk(ρ) +Deter. +Value of target speed in spatial step k adjusted by +value of γk(ρ). +˜λk +rk +Deter. +Licking rate in step k given by Equation 4.3. +Rk +˜λk +Stoch. +Reward outcome of spatial state k +4.1 +Spatial state parameter ˜λk: licking rate +Consider the relevance of the reward probability distribution for rk to the licking be- +haviour. +First, it is reasonable to consider the mouse regulating its licking rate us- +26 + +4.1. +Spatial state parameter ˜λk: licking rate +ing its perception of expected reward probability in the current state. +The expected +value of the reward probability in the current state (in step k) is the expected value of +Beta(β(1) +k (s), β(2) +k (s)), which is +µ(rk) = +β(1) +k +β(1) +k β(2) +k +. +(4.1) +Figure 4.1: Graphical model of updating internal representations at a given spatial step, the +associated learning parameters (green), and the associated behavioural parameters (blue). The +dotted squares indicate internal representations that are not observed in the data. Variables +inside circles have stochastic outcomes given their parents, and variables inside squares have +deterministic outcomes given their parents. State transitions trigger updating these variables +for the new step k + 1. Note that the model satisfies the Markov property. A description of the +conditional dependencies is included in Table 4.2. +27 + +Xk +Xk+1 +μ(rk) +μ(rk+1) +Yk(p) +o(rk) +Yk+1(p) +o(rk+1) +K+ +k+1 +Rk +Rk+14.1. +Spatial state parameter ˜λk: licking rate +Second, independently from the expectation of reward, the degree of uncertainty about +the true probability of reward may also be relevant to behaviour (Zhao & Warren 2015), +and in particular, the rate of licking in the current state. More variance in the reward +probability may mean that the current state should be further explored by licking, to +decrease the uncertainty about reward values. The variance reward probability beliefs +can also be calculated from the Beta() distribution. +σ2(rk) = +β(1) +k β(2) +k +(β(1) +k ++ β(2) +k )2 (β(1) +k ++ β(2) +k ++ 1) +(4.2) +Let Lt be a random variable for the number of licks at time step t. We assume that the +licking rate is generated by a Poisson distribution +Lt ∼ Pois( ˜λk) +where for model parameters ω1, ω2 and ω3, +˜λk = ω1µ(rk) + ω2σ(rk) + ω3, +(4.3) +is the licking rate at a time step spent within the current spatial step. The probability +that Lt = lt, for a number of licks lt is given by +P(Lt = lt) = λlt +k e−λk +lt! +(4.4) +Table 4.3: Parameters relevant to the licking behaviour. +. +Parameter +Type +Description +˜λk +Spatial state +parameter +Rate of the Poisson distribution generating the +licking behavior within a time step spent in spatial +step k +ω1 +Model parameter +Weight of the expected reward probability of the +current reward distribution for calculating the +spatial state parameter ˜λk +ω2 +Model parameter +Weight of the standard deviation of the current +reward distribution for calculating the +spatial state parameter ˜λk +ω3 +Model parameter +base licking rate for calculating ˜λk +28 + +4.2. +Parameter ˜νk: target speed within the current spatial state +4.2 +Parameter ˜νk: target speed within the current +spatial state +We noticed that the mouse tends to speed up if it does not expect a reward in upcoming +states (for example, see Figures 2.5 and 2.6). We model this behavior using a discounted +measure of future rewards. +Discounted future reward +Expected average reward probability m steps after the current state s can be formulated +as follows +� +s′∈S +E(r[s′])P(Xk+m = s′|Xk = s) +(4.5) +Value of P(Xk+m|Xk) can be estimated by the transition probability matrix obtained +by the expected value of transition probabilities and standard Markov chain transition +properties (Equation 1.1) (Häggström et al. 2002). To estimate the values of the transition +probability matrix, we use the expected value of transition probability for p(s,s′), using +parameters of Dirichlet distributions for transition probabilities in Ak; +E[p(s,s′)] = +αk(s, s′) +� +s′′∈Adj(s) +αk(s, s′′) +is the estimated probability value for p(s,s′) entry of the transition probability matrix. +To conclude the discussion for the calculation of expression 4.5, note that E(r[s′]) = +β(1) +k (s′)/ +� +β(1) +k (s′)β(2) +k (s′) +� +. +Now, let us define the discounted future reward γk(ρ) for a fixed value of ρ in the current +step k to be +γk(ρ) := +∞ +� +m=0 +ρm� +s′∈S +� +E[r(s′)]P(Xk+m|Xk) +� +�∞ +m=0 ρm +(4.6) +Note that γk(ρ) is a normalised sum of discounted present and future expected re- +ward probability values. Similar to the value function in reinforcement learning (Sut- +ton & Barto 2018), or the concept of discounted cash flow in financial asset valuation +29 + +4.3. +Generative model of licking and speed +(Damodaran 2012), it incorporates all future reward values by iteratively giving less +weight to future rewards that are further away. +When transitioning from one state to another, lower discounted future reward γk(ρ) is +likely to indicate that the next reward is further away. In this case, the mouse may choose +to adjust its behavior (Kleinfeld et al. 2006), by speeding up to pass the unrewarded +regions more quickly. Since the discounted value of future reward does not change as +long as the mouse is in the same spatial state, the desired speed at the current spatial +step can be modeled as a spatial state parameter. Let the target speed ˜νk for the current +state be +˜νk := vmax +� +1 − γk(ρ) +� +(4.7) +where vmax is a model parameter that puts an upper bound on the target speed. A simple +model of speed for time step t is the following +vt ∼ N( ˜νk, σ2 +ν). +(4.8) +However, physical constraints on the movement does not permit an instant jump in speed +when the spatial state changes. The alternative model of speed that takes the physical +constraints into considerations (by adding more parameters), is +vt+1 ∼ N(E[vt+1], V ar[vt+1]), +(4.9) +where, +(E[vt+1], V ar[vt+1]) = +� +� +� +� +� +� +� +� +� +(vt + δ+ +v, σ2 +v) +for vt < ˜νk − ϵ, +(vt + δ- +v, σ2 +v) +for vt > ˜νk + ϵ, +(vt, σ2 +v) +otherwise; i.e., for vt ∈ [ ˜νk − ϵ, ˜νk + ϵ]. +(4.10) +where the model parameters δ+ +v and δ- +v are constant values for acceleration and deceler- +ation, σ2 +v is the variance of speed outcome in the next time-step. Furthermore, model +parameter ϵ determines the range where non-random acceleration or deceleration is not +enforced. +4.3 +Generative model of licking and speed +For given spatial states structure (by fixing parameters V and d), there exists a function +fV,d : (xLoc × Cor) → S that associates each position to states. Then it is possible to +30 + +4.3. +Generative model of licking and speed +Table 4.4: Parameters relevant to the speed behavior. +. +Parameter +Type +Description +ρ +Model parameter +Discount rate of future reward (Expression 4.6) +˜νk +Spatial state +parameter +Target speed (Expression 4.7) +σ2 +˜ν +Model parameter +Variance of speed in the first model (Expression 4.8) +σ2 +v +Model parameter +Variance of speed change +Expression 4.9 (second model) +δ+ +v, δ- +v +Model parameter +Acceleration and deceleration rate (second model) +ϵ +Model parameter +Range of random only of speed change (second model) +determine time steps associated with state transitions. In Chapter 3.1, we assumed that +the states are fully observable to the subject. Therefore, the subject knows the value of +fV,d at any current time step. +Binary variable Kt: indicator of spatial state transition event +For the current time step t, let Kt be a binary variable such that +Kt+1 = +� +� +� +0, +for fV,d(xt, yt) = fV,d(xt+1, yt+1) +1, +for fV,d(xt, yt) ̸= fV,d(xt+1, yt+1). +(4.11) +That is to say, Kt = 1 if (xt, yt) and (xt+1, yt+1) are not in the same state, ans so a state +transition has occurred. Note that a spatial state transition triggers an update in the +beliefs about the environment (reward probability within states and state transitions). +Then the internal representations in the graphical model of Figure 4.1 are updated to the +next spatial step, and the behavioral parameters λkt+1 and ˜ +nukt+1 correspond to the new +spatial step. For Kt = 0, the behavioral parameters ˜λkt+1 and ˜ +nukt+1 remain unchanged +from the previous time-step. +Figure 4.2 is the graphical model for the generative model of behavior within time steps. +The model assumes that the spatial state associated with (xt, yt) is unambiguously de- +termined by the subject (fully observable spatial states). Therefore, the value of Kt+1, +which indicates a state transition, is also observed by the subject. Furthermore, Kt+1 can +be deterministically inferred from the experimental data using the Equation 4.11. Hence, +it is also observed in the behavioral data. If Kt+1 = 1, then the graphical model of up- +dating internal representations is used to find the new behavioral parameters (indicated +by green arrows). If Kt+1 = 0, the behavioral parameters remain unchanged from the +previous step. A description of the relationships is included in Table 4.5. +31 + +4.3. +Generative model of licking and speed +Table 4.5: Description of relationships in the generative model of behavior in the graphical +model of 4.2. Variables ( Var.) and their parents (Par(.)) are included in the first and second +column respectively. Third column (Type) indicates whether the outcome of the variable given +its parents is stochastic ( Stoch.) or deterministic ( Deter.) given its parents. The conditional +dependence of the variable on its parents is described in the last column. +Var. +Par(.) +Type +Update description +Kt +(xt, yt) +(xt+1, yt+1) +Stoch. +Transition event indicator (Expression 4.11). +˜νkt+1 +˜νkt, Kt +Deter. +For Kt = 0, ˜νkt+1 = ˜νkt. Otherwise, spatial state changes, +and graphical model 4.1 updates the value. +˜λkt+1 +˜λkt, Kt +Deter. +For Kt = 0, ˜λkt+1 = ˜λkt. Otherwise, spatial state changes, +and graphical model 4.1 updates the value.). +lt +˜λk +Stoch. +Poisson distributed value with rate ˜λk (Expression 4.3) +vt +˜νk +Stoch. +Speed at time step t by first model (Expression 4.8 ), +or second model (Expression 4.9. +Figure 4.2: Graphical model of the generative model of behavior. Note that the variables and +relationships drawn in yellow and brown are not part of the internal model, and they describe +the conditional dependence of the observed values to the model variables. See table 4.5 for +description of the relationships. +32 + +(Xt,yt) +(Xt+1,Yt+1) +Kt+1 +K +lt+1 +Vt+14.4. +Estimation of model parameters +4.4 +Estimation of model parameters +Below, the general framework for estimating the model parameters is discussed. For a +fixed spatial model of space MV,d, let θ be the list of model parameters +θ := (V, d, ηr, ηp, ω1, ω2, ω3, σ2 +˜ν), +(using the second speed model), or +θ := (V, d, ηr, ηp, ω1, ω2, ω3, σ2 +˜v, δ ++ +v, δ- +v, ϵ) +(using the first speed model). +Given the model parameters, and given observational data, parents of vt and lt are deter- +ministically set at each time point (see graphical model 4.2). Therefore, speed and licking +are independent. So model likelihood of the generative model of behaviour at time step +t is +L +� +θ|(vt, lt) +� += P(vt, lt|θ) = P(vt|θ) P(lt|θ) +∼ f +� +µt(θ), σt(θ) +� +g +� +lt; λt(θ) +� +where f are g are probability mass functions for Gaussian and Poisson distributions +respectively. Note that their distribution parameters are deterministically fixed at each +time point given the model parameters (see Equations 4.3, 4.8 and 4.9). Then model +evidence for the generative model for up to time step N is +L +� +θ +���{(vt, lt) : t = 1 . . . N} +� +∝ +N +� +t=1 +f +� +vt; µt(θ), σt(θ) +� +g +� +lt; λt(θ) +� +(4.12) +And we can then use the maximum likelihood estimation (MLE) to estimate the fitted +model parameters +θ∗ = argmax +θ +N +� +t=1 +ln +� +f +� +vt; µt(θ), σt(θ) +� +g +� +lt; λt(θ) +�� +(4.13) +Note that for each spatial step, the graphical model is used for calculating the parameters +µt(θ), σt(θ) and λt(θ). +33 + +Chapter 5 +Discussion +The next step in the project is to first complete the model validation on synthetic data. +Before applying the model to real data, it is important to scrutinize the behaviour of the +generative model. We plan to do so by pre-determining values for a model parameter +and generating synthetic behavioural data. The generated behaviour is then used as a +given data set. If the model is well-behaved, the model parameters should be recoverable +from the synthetic data. As different spatial state structures radically alter the learning +dynamics, we will conduct the parameter recovery for spatial model parameters more +diligently. By considering various alternative hypotheses (different values for d and V), +the model evidence (equation 4.12) of alternative hypotheses will be compared. For a +well-behaved model, the model evidence for the parameters used to generate data is +expected to be the best. +5.1 +Limitations +While our model assumes fully observable Markov states, noisy observations of the loca- +tion and visual stimuli introduce uncertainty about the true current state of the system. +Indeed, observations of the environment are often noisy and some behavioural models +take this into account (Kang et al. n.d., Kersten & Mamassian 2009). While the learning +rates of reward probability and transition probability capture some aspects of noisy obser- +vations, they are not based on normative assumptions. Alternatives should be considered +for future research (Laquitaine & Gardner 2018). Fortunately, there is an extensive body +of research on partially observable Markov decision processes (Monahan 1982, Kaelbling +34 + +5.2. +Implications +et al. 1996) that would provide a clear path for improving the current model. +An alternative to estimating the model parameters using MLE in Chapter 4.4 is to use +the maximum a posteriori estimation (MAP) (Murphy 2012, Griffiths & Yuille 2008). +In contrast to MLE, which gives one estimated value for each parameter, MAP gives +a distribution for each parameter, characterising the level of uncertainty about each +parameter. Since some of the model parameters are qualitatively interpretable, MAP +may be particularly relevant. In particular, a distribution over possible options for V, +the set of discriminated visual stimuli, is highly relevant to the imaged activity of the +visual cortex. The potential challenge of MAP is that the computational difficulty of +the calculation may introduce implementation challenges that are difficult to resolve. +Nonetheless, its estimation of model parameters are potentially more meaningful for +studying visual perception. +5.2 +Implications +During the experiments, two-photon calcium imaging and optogenetics were performed +to determine changes in inputs and activity of individual excitatory and inhibitory cells +within the primary visual cortex. Previously, a multivariate auto-regressive linear model +(MVAR) was fitted to the neuronal data (Khan et al. 2018): +qt+1 = qt + A × qt + ut + ξvt +where qt is the vector of response levels at time step t for all n imaged neurons, A is +an n × n matrix that includes the fitted interaction parameters, ut is a fitted vector for +the stimulus-related input, and ξ is a fitted parameter for the contribution of current +speed vt. The MVAR model was used to compare the activity of populations of different +inhibitory and excitatory cell types. The only behavioural term that was included was +speed vt, which did not make a significant contribution. An immediate application of +the current behavioural model presented in this report is to potentially improve the +MVAR model by including parameters related to internal representations, In particular, +learned parameters that are likely to be relevant to behaviour, namely expected reward +probability µ(rk), variance σ2(rk), and discounted future reward γk(ρ) could potentially +improve the predictive power of the MVAR model. +If the internal representation terms from the behavioural model improve the predictive +power of the MVAR model, it will give new insights into the information encoded in +neurons located in the primary visual cortex. Future experiments can then be designed to +35 + +5.2. +Implications +systematically manipulate these internal terms to understand the precise representations +(Heilbron et al. 2020). This will help us understand how the structure of the environment +changes learning dynamics and internal representations. +36 + +Bibliography +Bibliography +Barlow, H. B. et al. (1961), ‘Possible principles underlying the transformation of sensory +messages’, Sensory communication 1, 217–234. +Beck, J. M., Ma, W. J., Kiani, R., Hanks, T., Churchland, A. K., Roitman, J., Shadlen, +M. N., Latham, P. E. & Pouget, A. (2008), ‘Probabilistic population codes for bayesian +decision making’, Neuron 60(6), 1142–1152. +Berkes, P., Orbán, G., Lengyel, M. & Fiser, J. (2011), ‘Spontaneous cortical activ- +ity reveals hallmarks of an optimal internal model of the environment’, Science +331(6013), 83–87. +Bishop, C. M. (2006), Pattern recognition and machine learning, springer. +Chen, T.-W., Wardill, T. J., Sun, Y., Pulver, S. R., Renninger, S. L., Baohan, A., +Schreiter, E. R., Kerr, R. A., Orger, M. B., Jayaraman, V. et al. (2013), ‘Ultrasensitive +fluorescent proteins for imaging neuronal activity’, Nature 499(7458), 295–300. +Damodaran, A. (2012), Investment valuation: Tools and techniques for determining the +value of any asset, Vol. 666, John Wiley & Sons. +Dombeck, D. A., Harvey, C. D., Tian, L., Looger, L. L. & Tank, D. W. (2010), ‘Functional +imaging of hippocampal place cells at cellular resolution during virtual navigation’, +Nature neuroscience 13(11), 1433–1440. +Fiser, J., Berkes, P., Orbán, G. & Lengyel, M. (2010), ‘Statistically optimal perception +and learning: from behavior to neural representations’, Trends in cognitive sciences +14(3), 119–130. +Fishell, G. & Kepecs, A. (2019), ‘Interneuron types as attractors and controllers’, Annual +37 + +Bibliography +review of neuroscience 43. +Geisler, W. S. (2003), ‘Ideal observer analysis’, The visual neurosciences 10(7), 12–12. +Geisler, W. S. (2011), ‘Contributions of ideal observer theory to vision research’, Vision +research 51(7), 771–781. +Griffiths, T. & Yuille, A. (2008), ‘A primer on probabilistic inference’, The probabilistic +mind: Prospects for Bayesian cognitive science pp. 33–57. +Häggström, O. et al. (2002), Finite Markov chains and algorithmic applications, Vol. 52, +Cambridge University Press. +Harvey, C. D., Collman, F., Dombeck, D. A. & Tank, D. W. (2009), ‘Intracellular dynam- +ics of hippocampal place cells during virtual navigation’, Nature 461(7266), 941–946. +Heeger, D. J. (2017), ‘Theory of cortical function’, Proceedings of the National Academy +of Sciences 114(8), 1773–1782. +Heilbron, M., Richter, D., Ekman, M., Hagoort, P. & De Lange, F. P. (2020), ‘Word +contexts enhance the neural representation of individual letters in early visual cortex’, +Nature communications 11(1), 1–11. +Kaelbling, L. P., Littman, M. L. & Moore, A. W. (1996), ‘Reinforcement learning: A +survey’, Journal of artificial intelligence research 4, 237–285. +Kang, Y. H., Mahr, J., Nagy, M., Andrási, K., Csibra, G. & Lengyel, M. (n.d.), ‘Eye +movements reflect causal inference during episodic memory retrieval’. +Kepecs, A. & Fishell, G. (2014), ‘Interneuron cell types are fit to function’, Nature +505(7483), 318–326. +Kersten, D. & Mamassian, P. (2009), ‘Ideal observer theory’, Encyclopedia of neuroscience +5, 89–95. +Khan, A. G., Poort, J., Chadwick, A., Blot, A., Sahani, M., Mrsic-Flogel, T. D. & Hofer, +S. B. (2018), ‘Distinct learning-induced changes in stimulus selectivity and interactions +of gabaergic interneuron classes in visual cortex’, Nature neuroscience 21(6), 851–859. +Kleinfeld, D., Ahissar, E. & Diamond, M. E. (2006), ‘Active sensation: insights from the +38 + +Bibliography +rodent vibrissa sensorimotor system’, Current opinion in neurobiology 16(4), 435–444. +Kriegeskorte, N. & Douglas, P. K. (2018), ‘Cognitive computational neuroscience’, Nature +neuroscience 21(9), 1148–1160. +Laquitaine, S. & Gardner, J. L. (2018), ‘A switching observer for human perceptual +estimation’, Neuron 97(2), 462–474. +Maloney, L. T. & Mamassian, P. (2009), ‘Bayesian decision theory as a model of human +visual perception: Testing bayesian transfer’, Visual neuroscience 26(1), 147–155. +Monahan, G. E. (1982), ‘State of the art—a survey of partially observable markov decision +processes: theory, models, and algorithms’, Management science 28(1), 1–16. +Murphy, K. P. (2012), Machine learning: a probabilistic perspective, MIT press. +Orbán, G., Fiser, J., Aslin, R. N. & Lengyel, M. (2008), ‘Bayesian learning of vi- +sual chunks by human observers’, Proceedings of the National Academy of Sciences +105(7), 2745–2750. +Poort, J., Khan, A. G., Pachitariu, M., Nemri, A., Orsolic, I., Krupic, J., Bauza, M., +Sahani, M., Keller, G. B., Mrsic-Flogel, T. D. et al. (2015), ‘Learning enhances +sensory and multiple non-sensory representations in primary visual cortex’, Neuron +86(6), 1478–1490. +Pouget, A., Dayan, P. & Zemel, R. S. (2003), ‘Inference and computation with population +codes’, Annual review of neuroscience 26(1), 381–410. +Saleem, A. B., Diamanti, E. M., Fournier, J., Harris, K. D. & Carandini, M. (2018), +‘Coherent encoding of subjective spatial position in visual cortex and hippocampus’, +Nature 562(7725), 124–127. +Sutton, R. S. & Barto, A. G. (2018), Reinforcement learning: An introduction, MIT press. +Yan, Y., Rasch, M. J., Chen, M., Xiang, X., Huang, M., Wu, S. & Li, W. (2014), +‘Perceptual training continuously refines neuronal population codes in primary visual +cortex’, Nature neuroscience 17(10), 1380–1387. +Yang, T. & Maunsell, J. H. (2004), ‘The effect of perceptual learning on neuronal re- +39 + +Bibliography +sponses in monkey visual area v4’, Journal of Neuroscience 24(7), 1617–1626. +Yap, E.-L. & Greenberg, M. E. (2018), ‘Activity-regulated transcription: bridging the +gap between neural activity and behavior’, Neuron 100(2), 330–348. +Zhao, M. & Warren, W. H. (2015), ‘How you get there from here: +Interaction of +visual landmarks and path integration in human navigation’, Psychological science +26(6), 915–924. +40 + diff --git a/DNE0T4oBgHgl3EQfygIs/content/tmp_files/load_file.txt b/DNE0T4oBgHgl3EQfygIs/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..2cedbbba655717efdf20b88194c195c1cc6fef65 --- /dev/null +++ b/DNE0T4oBgHgl3EQfygIs/content/tmp_files/load_file.txt @@ -0,0 +1,1497 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf,len=1496 +page_content='Bayesian Modelling of Visual Discrimination Learning in Mice Pouya Baniasadi, PhD Department of Physiology, Development and Neuroscience UNIVERSITY OF CAMBRIDGE August 2020 This project report is written in partial fulfilment of the requirement for the Master of Philosophy in Basic and Translational Neuroscience Supervised by Dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Jasper Poort Prof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Máté Lengyel Selective Vision Laboratory Computational and Biological Learning Laboratory Department of Psychology Department of Engineering arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='02659v1 [q-bio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='NC] 15 Nov 2022 入Dedication For my parents Mahin and Ghasem, who taught me about pursuing dreams, for their endless love, support and sacrifices i Declaration This report describes work carried out at Cambridge University from Jan 2020 to Jul 2020 under the supervision of Dr Jasper Poort (Selective Vision Laboratory at the Department of Psychology) and Prof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Máté Lengyel (Computational and Biological Learning Lab at the Department of Engineering) as a part of the MPhil program in Basic and Translational Neuroscience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' I confirm that the material in this report is not copied from any published material, nor is it a paraphrase or abstract of any published material unless it is identified as such and a full source reference is given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' I confirm that, other than where indicated above, this document is my own work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Pouya Baniasadi August 2020 ii Abstract The brain constantly turns large flows of sensory information into selective representations of the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It, therefore, needs to learn to process those sensory inputs that are most relevant for behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It is not well understood how learning changes neural circuits in visual and decision-making brain areas to adjust and improve its visually guided decision-making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To address this question, head-fixed mice were trained to move through virtual reality environments and learn visual discrimination while neural activity was recorded with two-photon calcium imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Previously, descriptive models of neuronal activity were fitted to the data, which was used to compare the activity of excitatory and different inhibitory cell types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, the previous models did not take the internal representations and learning dynamics into account.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Here, I present a framework to infer a model of internal representations that are used to generate the behaviour during the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We model the learning process from untrained mice to trained mice within the normative framework of the ideal Bayesian observer and provide a Markov model for generating the movement and licking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The framework provides a space of models where a range of hypotheses about the internal representations could be compared for a given data set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' iii Contents Contents Declaration ii Abstract iii 1 Introduction 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Mathematical preliminaries .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2 2 The experiment 6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Experimental setup .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Behavioral data and observations .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 8 3 Behavioral model part 1: internal representations 13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Structure of spatial states .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 14 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Space of models for spatial states .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 16 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 Bayesian learning model .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 18 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Learning reward probability within a state .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 19 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Learning state transitions .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 21 4 Behavioral model part 2: the generative model 25 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Spatial state parameter ˜λk: licking rate .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 26 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Parameter ˜νk: target speed within the current spatial state .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 29 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 Generative model of licking and speed .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 30 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4 Estimation of model parameters .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 33 5 Discussion 34 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Limitations .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 34 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Implications .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 35 Bibliography 37 iv Chapter 1 Introduction Learning modifies neural representations of behaviourally relevant information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' While changes in response selectivity to behaviourally relevant stimuli have been observed in many studies across different species (Yang & Maunsell 2004, Yan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2014, Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' There has been growing evidence that different cell types, classified using molecular and cellular properties (Kepecs & Fishell 2014), have specific roles in learning (Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2018, Fishell & Kepecs 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, the nature of these changes and how they relate to sensory coding is not well understood (Yap & Greenberg 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Probabilistic models of behavioural learning are an important approach to link the changes in neural representations to internal representation of the environment and decision-making (Fiser et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2010, Berkes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2011, Heeger 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Given the non- deterministic nature of events in the real world, human and animal learning must involve at least some internal representations of the uncertainties in the environment (Barlow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1961).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' There has been an extensive body of research on how the nervous system represents uncertainly about the environment (Pouget et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2003, Beck et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2008, Fiser et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2010, Kriegeskorte & Douglas 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning theory provides a normative framework of learning that represents uncertainty in probabilistic outcomes(Bishop 2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In particular, the ideal observer analysis uses the Bayesian learning theory for achieving optimal learning performance in a given task (Geisler 2003, 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Learning can be conceptualised as the incorporation of sensory information to update and improve performance on a given task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' the ideal observer performs at the theoretical limits of information processing to update their 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Mathematical preliminaries beliefs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, it is important to note that optimality in this context refers to the optimal incorporation of information, which is not equivalent to achieving the optimal solution in all trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' While the nervous system may or may not have representations similar to an ideal observer, the ideal observer analysis provides a systematic framework to formulate hypotheses about the internal representations and learning dynamics (Maloney & Mamassian 2009, Orbán et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In this thesis, we describe a Bayesian learning model using the framework of ideal observer learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Our goal is to develop a model of internal representations of reward and space that are used for learning and adjusting behaviour in the visual discrimination task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This model will allow us in future work to relate the neuronal activity measurements (Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2015, Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2018) to the internal representations that guide behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We continue this chapter with a brief overview of the basic mathematical ideas used to develop the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then, in Chapter 2, we explain the experimental setup and describe the behavioural data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A space of models (for the structure of Markov models) is introduced in Chapter 3 which defines the internal representations of reward and state transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then, a Bayesian model of learning reward probabilities and state transitions is described that uses the ideal observer framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In Chapter 4, we introduce a generative Markov model that uses internal representations to generate behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We also discuss the use of maximum likelihood estimation to estimate the model parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Finally, in Chapter 5, we discuss the potential applications and limitations of the model and set out a path for the continuation of the research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Mathematical preliminaries In this section, I briefly introduce the concepts that provide the mathematical foundation of the Behavioral model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Markov chain model A system has the Markov property if the predictions about future events only require the knowledge of the system’s present state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In other words, given the present state of the system, future events are conditionally independent of past events.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A Markov chain is a stochastic model of a sequence of events with the Markov property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let S = s1, s2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', sr be a set of states for a Markov chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The process starts in one of these states and moves sequentially from one state to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Each move is called a step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let Xn be the current step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We denote by pij = P(Xn+1 = sj|Xn = si), the transition 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Mathematical preliminaries probability of visiting state sj after visiting si.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that by Markov property, given Xn, Xn+1 is conditionally independent of the past states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A transition from si to sj can be represented as a directed edge (si, sj) with a corresponding transition probability pij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The sum of transition probabilities of the outgoing edges from a state should add up to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 illustrates a Markov chain with 4 states and transition probabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Movement in a corridor simulated in the VR environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let T = [pij] be the transition probability matrix for the Markov chain and let u be the probability vector which represents the starting distribution (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Xk ∼ u).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then the probability that the chain is in state si after m steps is the i-th entry in the vector u(m) := u T m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' That is, P(Xk+m|Xk) = u(i)(m), where u(m) = uT m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1) Bayesian learning The probability of an event A is denoted by P(A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Consider another event B and its corresponding probability P(B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The conditional probability P(A|B) is the conditional probability of A given B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayes Theorem states that P(A|B) = P(B|A) P(A) P(B) Consider a system that generates data and a space of possible models for describing the behaviour of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The probability distribution over the space of models P(Model) represents the prior knowledge about the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Suppose that a set of data D is observed from the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then P(D | Model) is called the likelihood and P(Data) 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='7 S1 S2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 S4 S3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Mathematical preliminaries is called the model evidence or marginal likelihood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The posterior distribution over the models P(Model | D) represents our beliefs about the system after observing the data D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayes rule provides a principled way of updating our beliefs about the system after observing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Formally, P(Model | Data) = P(Data | Model) P(Model) P(Data) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2) Dirichlet distribution learning of categorical probability values Consider a random variable which can take on K possible categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The categorical distribution is a discrete probability distribution for the random variable, where the probability of each category is separately specified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The categorical distribution is a generalisation of the Bernoulli distribution for a discrete variable with more than two outcomes, such as the probability of outcomes for a 6-sided die.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It is also a special case of the multinomial distribution where the number of trials in one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If the probabilities of each outcome for a categorical distribution are unknown, using Bayesian learning, we can update prior probability distributions of probability values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The Dirichlet distribution is a conjugate before the multinomial (and categorical) distri- bution, meaning starting with a Dirichlet prior and multinomial likelihood, the resulting posterior is also a Dirichlet distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The probability mass function for the Dirichlet distribution DirK(α) with K categories is f(p|α) = 1 B(α) K � i=1 pα(i)−1 i , (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3) where α = (α(1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' , α(K)) is the vector of parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, B(α) = K � i=1 Γ(α(i)) Γ � �K i=1 α(i)� where for positive real number n, Γ(n) = � ∞ 0 xn−1e−xdx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For integer values, Γ(n) = n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To learn probabilities for a categorical distribution, given a prior distribution DirK(α) 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Mathematical preliminaries over the probability vector p = (p1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' , pK), and data c = {c1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' , ck} representing the number of observation Dirichlet category, the posterior distribution is P(p|x) = (p|x + α) p|x ∼ DirK(x + α) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4) Finally, the Beta distribution is a special case of the Dirichlet distribution where the outcomes are binary (true or false).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To distinguish this special case, we may use the notation Beta(β(1), β(2)) ≡ Dir2(α) where α = {β(1), β(2)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 5 Chapter 2 The experiment In this chapter, I describe the experimental setup in Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018) and Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2015) for which we have developed a behavioural model in the later chapters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A summary of previous findings and a description of the behavioural data accompanied by figures are also included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Experimental setup The experimental setup involves the placement of the mouse on a cylindrical treadmill where its head is fixed to enable imaging of neural activity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The mouse can move forward (and backward).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In front of the mouse, a screen is shown to the animal where visual feedback connected to the movement can simulate the movement of the subject in an environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' By controlling the setup of the space and visual stimulus while allowing imaging, the VR setup has been extensively used for studying the visual cortex and hippocampus in mice in recent years (Harvey et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2009, Dombeck et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2010, Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2018, Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2015, Saleem et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 illustrates the VR setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Movement in a corridor simulated in the VR environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Experimental setup Specifics of the corridor space and reward administration We specifically consider the experimental setup described in Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In these two studies, the activity of populations of neurons in V1 was mea- sured with two-photon calcium imaging Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2013) during a visual discrimination task in a virtual reality (VR) environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Head-fixed mice ran through a simulated corridor where different types of visual stimuli were displayed on the walls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Three types of wall patterns characterise the different corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In the grey corridor a short stretch of circle patterns followed by grey walls for a random distance, before the pattern on the walls abruptly changes to one of the grating corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The grating corridors either displayed vertical gratings (illustrated in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1) or angled gratings for a fixed length (60 VR length units), before the grey corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' An illustration of the corridor space is displayed in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2: Illustration of the corridor space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A milk dispenser was placed in front of the mouse to administer rewards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Mice received a reward for licking the dispenser in a reward zone starting halfway in the vertical grating corridor and halfway for around 10 VR-length units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If the mouse licked the dispenser in the reward zone, it would trigger the opening of the reward valve and a drop of soy milk would appear at the dispenser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' No punishment was given for licking in the corridors with grey and angled grating walls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' All mice learnt to discriminate the two stimuli, starting at the chance performance (behavioural d′ close to zero) and reaching the threshold criterion of d′ > 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 within 5-9 days.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 7 reward zone2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Behavioral data and observations summary of previous findings The motivation behind developing a behavioural model is to take advantage of the be- havioural data for the future analysis of experiments similar to Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A summary of results in Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018) is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' After learning the visual discrim- ination task, neurons showed increased stimulus selectivity for the angled and vertical gratings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Interestingly, this effect depended on the cell types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In particular, stimulus se- lectivity for populations of pyramidal cells (PYR) along with parvalbumin (PV), somato- statin (SOM), and vasoactive intestinal peptide-expressing (VIP) inhibitory interneurons in layer 2/3 (L2/3) of the primary visual cortex (V1) were compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Selectivity was increased for PYR and PV cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' PV neurons became as selective as the PYR cells, and showed changes in functional interactions, particularly with PYR cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' On the other hand, SOM neurons became decorrelated from the network and PYR–SOM coupling before learning predicted selectivity increases in individual PYR cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' While SOM inhi- bition seemed to gate changes in selectivity, PV cells provided strong stimulus selective inhibition after learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A multivariate autoregressive linear model (MVAR model) fit- ted the activity of the neurons, and further supported the statistical analysis results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, the MVAR model arguably neglects potentially important information in the behavioural data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Even though speed is taken into account, its contribution to the be- haviour of the MVAR model is negligible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Accordingly, one of the primary motivations of the behavioural model proposed in this report is potential improvements in the (MVAR model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This is discussed in more detail in Chapter 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Behavioral data and observations Behavioural data were collected during the experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The distance travelled from the onset of the current corridor and the corridor type (determined by the wall patterns) is continuously recorded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The observed variables of spatial location and visual stimuli at each time are marked by a pair (x, y) ∈ (xLoc × Cor), where x is the distance travelled from the onset of the current corridor pattern, and y is the corridor pattern.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Set xLoc = [0, max(x)] is an interval from 0 to the maximal length of an interval max(x) and set Cor = {grey, vertical, angled} is the set of corridor types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The speed of the subject at each time is also recorded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A list of licking times and valve opening times (indicating reward administration) is also given by the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For the generative behavioural model in Chapter 4, we discretize the data into time intervals of ∆τ seconds, each identified by an index t ∈ {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' , N}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The value of ∆τ determines the time resolution of the behavioural data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Since the imaging data of 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Behavioral data and observations Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018) is taken in 1 8 second intervals, time resolutions lower than 1 8 seconds are not useful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Higher time resolutions may be desirable because they will decrease the computational cost of the analysis, but the cost of losing time resolution must be discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, unless explicitly discussed, we can assume ∆τ = 1 8 for the data analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 describes the notation used to describe the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that some of the records are behavioural, while others specify the values that are observed by the subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Behavioral and observational records for t ∈ {1, 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' , N}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Data Type Description xt Observation xt is the true value of distance from the onset of the current corridor at time step t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' yt Observation yt ∈ Cor = {grey, vertical, angled} is the true value of the corridor type, which determines the visual stimuli at time step t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ot Observation ot is a binary value for whether the reward valve has opened during the time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' vt Behavior Speed (average) at time step t lt Behavior Number of licks at time step t Instance of data visualisations The Figures below are instances of behavioural data visualizations from the experimental data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figures 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 (Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2015) illustrate the licking behaviour at different positions in different corridors, and Figures 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5 (Poort et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2015) and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6 (Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2018) give a colour map of speed at the different positions in the different corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For all Figures, the horizontal axis represents the position concerning the onset of the grating corridor 1, and the vertical axis is the trial index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Higher trial numbers are later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The black or red labels are data labels the ls associated with the experimental sessions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The following observations about the licking behaviour have influenced parameter defini- tions and assumptions about prior beliefs of the animal in Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' These observations are consistent among all subjects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' reward association prior: The mice do not know reward associations before the reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, the mice know that moving forward and licking the dispenser may lead to a reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Initially, the licking behaviour is frequent to explore the space and discover reward associations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A uniformly random prior for reward probability may be appropriate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1note that for the grey corridor this is obtained by shifting xt by the length of the grey corridor 9 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Behavioral data and observations Change of visual discrimination: The behaviour of the mice in the grating area and the grey area starts to diverge immediately, and the behaviour of the mouse in angled and vertical grating corridors seems to be similar at first;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' the differences of licking behaviour seem to be only after the reward is present in the vertical grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The dissociation of the award from the angled grating is realised substantially later than the dissociation of the reward from the grey area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It seems that at different points in the trial, the set of visually discriminated stimuli is different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Location is also taken into account As the learning progresses, the licking concentrates close to the reward zone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It seems that the mice associate a spatial region, characterised by both visual stimuli and spatial positioning, with the reward area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The following observations about the speed have influenced our generative model of speed in Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' These observations are consistent among all subjects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Reward association influences speed: the graphs suggest that the dissociation of reward in upcoming regions is associated with higher speed while anticipation of reward in upcoming regions is associated with reduction of speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Evidence for change in the internal model: while speed behaviour in the grey corridor diverges from the grating corridor quickly, the divergence of speed behaviour for angled grating and angled grating happen at a later point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This suggests that the mice initially correlate the grating areas with the reward, and then learn to differentiate between the grating areas to dissociate the angled grating with the reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Change of visual discrimination:: Similar to the licking behaviour, initially speed behaviour seems to discriminate between the angled and vertical gratings only after the reward is present in the vertical grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This suggests that the mice initially correlate the grating areas with reward, and then learn to discriminate between the vertical and angled grating areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 10 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Behavioral data and observations Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3: Lick locations for M27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' See the figure descriptions below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4: Lick locations for M31 in all trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The horizontal axis represents the location in a corridor, with 0 being set at the onset of a grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Negative values are in the grey corridors and positive values are in the grating corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The licking locations are marked by coloured points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Red dots represent licking within 1 length unit before a valve opening, and yellow indicates the licking after the opening of the reward valve, in a grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' All other lick locations are marked in black.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The trial number on the vertical axis shows the sequential order of the trials in each plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The right plot shows all trials, where each trial is passing through one grey corridor followed by a grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The middle and the left plots show a closer look at the vertical and angled grating corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The red labels are labels for the experimental sessions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 11 grey walls vertical gratings angled gratings Unrewarded Lick Lick after valve opens Lick within 1 unit before valve opens M27-date:20130515b-B2 M27-date:20130515b-B2 M27-date:20 130515b-B2 2000 900 M27-date:20130514b-B3 M27-date:20130514b-B3- M27-date:20 130514b-B3 1000 M27-date:20130513b-B3-.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='. M27-date:20130513b-B3 800 M27-date:20130513-B1 M27-date:20130513-B1 M27-date:20130513-B 1600 M27-date:20130511-B2 M27-date:20130511-B2--- 800 M27-date:20130510-B 1400 M27-date:201305 M27-date:20130510-B1 600 No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' M27-date:20130509-B 1200 M27-date:20130509-B Trial 2 M27-date:20130509- 600 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Angled-g 1000 rtical M27-date:20130508-B M27-date:20130508-B 400上 M27-date:20130508-B 800 400 300 600 M27-date:20130507-B M27-date:20130507-B M27-date:20130507-B 200 400 200 date:20130506 M27-date:201305 B 100 M27-date:20 130 200 0 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='x (VR length unit) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='x (VR length unit) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='x (VR length unit)grey walls ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='vertical gratings ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='angled gratings ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Unrewarded Lick ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Lick after valve opensLick within 1 unit before valve opens ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130612b-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130612b-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1600 M31-date:20130612-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130612-B ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130612-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130611b-B8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130611b-B8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130611b-B8 - ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='700 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1400 M31-date:20130611-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130611-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130611-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='700 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130610b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130610b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130610b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130610-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' M31-date:20130610-B1 诚 500 M31-date:20130610-B1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='. 1000 Trial 500 Trial M31-date:20130609b-B2 M31-date:20130609b-B2 9 M31-date:20130609b-B2 M31-date:20130609-B1 M31-date:20130609-B1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 800 M31-date:20130609-B1 Ver M31-date:20130608-B2 M31-date:20130608-B2 M31-date:20130608-B2 600 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130607-B3-- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130607-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130607-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130606-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130606-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130605-B4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31- date:20130605-B4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130605-B4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130604:B3 : ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130604-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130604-B3- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:201306031 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20 130603b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130603b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130603 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130603-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='x (VR length unit) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='x (VR length unit) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='x (VR length unit)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Behavioral data and observations Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5: Speed vs location for M31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' See the figure descriptions below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6: Speed and licks vs location for M70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The horizontal axis represents the location in the corridor, with 0 being set at the onset of a grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Negative values are in the grey corridors and positive values are in the grating corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The trial number on the vertical axis shows the sequential order of the trials in each plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The right plot shows all trials, where each trial is passing through one grey corridor followed by a grating corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The middle and the left plots show a closer look at the vertical and angled grating corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The colour for each location of each trial represents the speed of the animal at that point according to the colour scale;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' warmer colours represent higher speeds and cooler colours represent lower speeds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that for Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5, the speed is averaged over 5 unit intervals due to virtual memory limits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The white points show the lock locations for M70, and the small black star indicates a valve opening location during a trial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The black labels are data labels associated with experimental sessions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 12 speed (units per second) 0 10 20 40 50 M31-date:20130612b-B2 L M31-date:20130612b-B2 900 M31-date:20130612b-B2 M 800 1600 M31-date:20130612-B1 M31-date:20130612-B1 M31-date:20130612-B1 M31-date:20130611b-B8 800 M31-date:20130611b-B8 M31-date:20130611b B8 700 1400 |M31-date:20130611-B2 M31-date:20 130611-B2 M31-date:20130611-B2 700 M31-date:20 1306 10b-B 600 M31-date:20130610b-B3 600 No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:201306 10-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20 1306 10-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='FM31-date:20130610-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Trial ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Trial ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130609b-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130609b-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130609b-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20 130609-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130609-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130609-B1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Ver ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130608-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130608-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130608-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date 20 130607- B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-da ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-da ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='qe:20130607-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200M31-da ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='te:20130606-B2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130605 B4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date-20130604 B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20 130604-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20 130604 B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date: 20130603b- B ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20 130603b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date:20130603b-B3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='M31-date.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='20130603-B2 300 200 100 0 0 20 40 60 U 20 40 60 x (VR length unit) x (VR length unit) x (VR length unit)speed (units per second) 0 10 20 30 40 50 M70-date:20141028-B1 M70-date:20141028 B7 M70-date:20141028-B1 200 160 350 180 140 300 160 120 250 140 I No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Angled-g Trial l 100 Trial No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Trial 120 200 Vertical-g 100 80 150 80 M70-date:20141022-B1 60 M70-date:20141022-B1 60 M70-date:20141022-B1 100 40 40 50 20 20 0 0 250 200 150 100 50 0 50 0 20 40 60 0 20 40 60 x (VR length unit) x (VR length unit) x (VR length unit)Chapter 3 Behavioral model part 1: internal representations The behavioural model presented here provides a framework for inferring an internal model that can predict the animal’s behaviour at a given time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Before getting into the specifics, consider a broad perspective on inferring a model that generates the current be- haviour by incorporating past experiences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 is a graphical model of big-picture relation between the history of animal’s observations H, the internal model M that in- corporates experience into internal representations, and the observed behaviour B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This chapter discusses the relationship between the history of observations and behaviorally relevant representations in the internal model (H → M in the graphical model of Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' I introduce a space of models where a range of hypotheses about the internal model can be systematically examined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The internal representations about reward and space are then used in the next chapter to construct a generative model of behaviour (M → B in the graphical model of Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then using a systematic approach, an internal model is inferred that best describes the data (H and B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Relation between history of experimental observations H, internal model M, and behavior B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' H and B are observed in the experimental data, but the internal model M is unobserved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 13 H M B3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Structure of spatial states By exploring and experiencing the environment, the brain uses experience to update its beliefs (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', learning) about the environment using its internal representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In this learning model, the normative framework of Bayesian ideal observer analysis (Geisler 2003, 2011) is used to learn behaviorally relevant internal representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' These include learning about the probability of reward in different regions of the VR corridor, and expectations about upcoming spatial regions when moving forward1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Model of spatial states in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 describes how the space (VR corridor) is divided into states corresponding to spatial segments, where the representation of reward probability within a state only depends on the information (history of reward outcomes) obtained at that state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The structure of these states is a Markov chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The space of models in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 prescribes a range of Markov chain structures of spatial states within which a model is selected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For given states of a model, the dynamics for learning reward associations and state transitions are considered within the normative framework of the Bayesian ideal observer model in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Structure of spatial states Animals’ observation of visual stimuli and spatial positioning is an observation of the current (x, y) ∈ {xLoc, Cor}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Observations about reward association at the current location (x, y) may be relevant to reward association at some other locations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It is therefore necessary to define spatial regions where reward observations are relevant to the entire region but explicitly irrelevant to other regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To formalise this concept, the objective of this section is to associate the segments of space with states where the information about reward association is relevant to the current state and no other state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A reasonable way to define such states is to group areas that are spatially close by, visually similar, or both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Defining states associated with spatial segments Taking into account both spatial proximity and visual similarity, consider sectioning xLoc into a finite set of mutually exclusive spatial segments each identified by a fixed y, and an interval Ix for x values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We illustrate an example of spatial segmentation in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Denote by S a set of states and associate each segment with only one state (note that multiple segments may be associated with the same state).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then we say that the mouse is in state s if its position (x, y) is inside a segment that is associated with s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We associate all positions in all corridors with only one state with the function f : (xLoc × Cor) → S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1The subject can only move forward due to the experimental setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 14 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Structure of spatial states The mouse may map locations onto states in multiple ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' By considering various ways to map between locations and states, we can infer the mapping that best matches the behavioural data (see 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Spatial state transition event and structural properties Let Xk be the random variable describing the k-th visited spatial state, where a spatial state transition event (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', transition to the next spatial step) happens when the subject crosses the initial point of a segment associated with a state2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Given the current position, the future positions do not depend on the history of visited positions, so given Xk, state Xk+1 is conditionally independent of Xn for n < k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It follows that the state structure as defined above satisfies the Markov property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We assume that the spatial states are fully observable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In other words, given a state structure, we assume that the subject always knows which state is the current state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Observations of the animal may be noisy and inaccurate, so assuming fully observable states is a simplification that may be contended with in a more sophisticated future model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, states are associated with intervals of space rather than precise points in space, and they already incorporate some approximation about the spatial awareness of the subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We assume that the mouse learns two things from the visual stimuli and licking in state s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' First, it learns the reward association in that state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Second, it learns the transition from that state to other states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let r(s) be the probability that licking in state s leads to reward in state s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Also, denote by p(s,s′) = P(Xk+1 = s′|Xk = s) the transition probability Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2: An example of dividing the corridor space into mutually exclusive spatial segments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Each segment is then associated with exactly one state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2Note that the time spent in each state is not fixed in this Markov model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 15 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Space of models for spatial states of visiting any state s′ after s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' These parameters are initially unknown to the mouse and should be learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3, I discuss a semi-normative model of learning for these parameters using the ideal observer framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It is worth noting that the state transitions of the Markov chain are sparse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To understand the sparsity of state transitions, first note that x is a positive real value, which ranges from 0 to the maximal length of a corridor with the same patterns, and y is a discrete value with three possible entries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' From the onset of a corridor, until the onset of the next corridor, the spatial location is a continuous function of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Within the period between two consecutive onsets, if a state transition happens, it can only be to the state associated with the next interval of x, with the same y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Moreover, when passing the onset of the next corridor, there is a discrete change in the value of y, and x = 0 at the onset of the new corridor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This event can only be a state transition to the start of a new corridor (a state that starts at x = 0) so there are at most three such possible transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It follows that the structure of states is a sparse Markov chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Space of models for spatial states To define a space of models M , we use two parameters for identifying a model in the model space;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' one for the set of discriminated patterns (V), and one for the length of segments (d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Spatial model parameter V: set of discriminated visual stimuli Let V be the set of visual stimuli that are discriminated in the spatial state model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The set of possible choices for V is {V1, V2, V3} which are described below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' V1 = {u := undifferentiated}, where the grey and grating are not discriminated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' V2 = {g := grey, va := angled or vertical grating}, where the grey corridor is dis- criminated from the grating corridors, but where angled and vertical grating corri- dors are not discriminated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' V3 = {g := grey, v := vertical, a := angled}, where the grey corridor, the angled and vertical grating corridor are discriminated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' While set Cor contains the types of visual stimuli on the corridors, set V refers to subjec- tive visual discrimination (or classification) between corridors by the mouse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Also note that the choices for set V implicitly contain a mapping from Cor to V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 16 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Space of models for spatial states Spatial model parameter d: length of states Denote by d a value in the interval (0, max(x)] for the length of spatial segments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Value d uniquely defines a sequence of intervals of x values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For example, the associated sequence of intervals to d = 30 is {[0, 30), [30, 60), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' }.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then state sij is associated with the j-th interval of x, which is [(j −1)d , jd), and i ∈ C identifies the visual stimuli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For example, for V = {g, p} and d = 30, the state sp,2 refers to intervals of x ∈ [30, 60) for both the vertical and angled grating corridors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Model space Now it is possible to introduce a Markov model MV,d ∈ M with the set of states S that are associated with the spatial intervals induced by V and d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Since the length of the a Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3: Nine instances of Markov chain models MV,d for choices of V selected instances of d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For d = xmax, there is only one state per and self transition event only occurs when the corridor type changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The length of the angled and vertically grating corridors is exactly 60 (VR length units) in the experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' So for d = 60 and d = 20, there are exactly 1 and 3 states associated with the relevant element in V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that the figure illustrates only selected instances of the model space M .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 17 Mv,d V = V1 V = V2 V = V3 [u] (g ,va) (g ,V,a) d = max(x) 09 = p d = 203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Parameters for the model of spatial states .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Parameter Type Description V Spatial model parameter Set of discriminated visual stimuli on the corridors in the model MV,d;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Possible options are V1 = {u}, V2 = {g, p} and V3 = {g, v, a}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' d Spatial model parameter A constant length in (0, max(x)] for the length of the spatial for model MV,d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' corridor is bounded by max(x), model MV,d is a finite state Markov model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For example, MV1,max(x) and MV3,max(x) have exactly one and three states, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 illustrates the states of Markov chain models MV,D for example cases of V and d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Parameters V and d are free parameters that will be set during the model selection, which will be further discussed in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The fit for parameter V, selected from V1, V2 or V3, is determined by which stimuli the animal discriminates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The true value for d is the length of spatial segments where information about reward associations and state transitions in the current segment is reasonably independent of segments associated with other states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For the sake of simplicity, it is assumed that d is a fixed value, and it is the same across different visual stimuli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' However, relaxing this assumption is possible by having more free parameters, for example, by introducing a free parameter of distance for each element of V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For example, suppose V = V3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then instead of a free parameter d, we could use three parameters in D = {dg, da, dv} which contains one free parameter of distance for every element of V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In the initial implementation of the model, one parameter d is considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In summary, parameters V and d for a model MV,d determine the structure of the states in the Markov chain, where for each state the learning dynamics about reward association and state transitions is only dependent on the observations in that state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The learning dynamics are discussed in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 Bayesian learning model As first noted in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1, in any state s, the subject uses sensory information to learn r(s), the probability that licking in s leads to the administration of reward in s, or reward probability of s for short.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, state transition probability p(s,s′), which is the probability of visiting state s′ after visiting s, is also unknown to the subject and it is learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Here, we use the ideal observer framework (Geisler 2003) to develop a semi-normative model for learning both reward associations and state transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In this 18 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model section, the learning dynamics are discussed for a given model M ∈ M .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Therefore, states S and their associated spatial intervals are unambiguous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Learning reward probability within a state Recall that reward is given to the subject immediately after the subject licks the dispenser in the reward zone (see Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 for details of the experimental setup).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The reward is a fixed amount of milk administered via the dispenser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We noticed that even in trained animals, licking started before the reward zone (see example mice in Figures 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This suggests that the mouse associates an extended region with the reward delivery which starts before the reward zone set by the experimenters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Reward outcome Rk of current spatial step k If the mouse licks the dispenser in state s, it collects some information about the unknown parameter r(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If the subject does not lick the dispenser, it obtains no information about r(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let the random variable Rk = (R(T) k , R(F) k ) be the reward outcome of spatial step k, where R(T) k counts the number of positive outcomes, and R(F) k counts the number of negative outcomes in spatial step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' As a consequence of the experimental setup, the amount of reward and the frequency of licking in the experiment does not provide any additional information about a reward region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, spatial states are defined to be regions where licking at different points within the region does not provide additional information about the reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Therefore, each visit to a state provides only three possible reward outcomes: Rk = (1, 0) for subject licking the dispenser in spatial step k followed by reward becoming available in spatial step k, Rk = (0, 1) for subject licking the dispenser in spatial step k followed by no reward in spatial step k, and Rk = (0, 0) for subject not licking the dispenser in spatial step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Normative model for updating internal reward representations (Bayesian) Let us first discuss how an ideal observer updates its prior beliefs about r(s) after visiting state s in spatial step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The ideal observer provides a theoretical upper limit of perfor- mance, given the collected data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It is therefore a normative framework for updating the beliefs about reward association.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let prior beliefs about r(s) right before visiting spatial 19 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model step k be a Beta distribution Beta(β(1) k (s), β(2) k (s)) over the interval [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The reward outcome Rk = (R(T) k , R(F) k ) is the data that is newly collected about the reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' By Equation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4, the posterior is r(s)|Rk ∼ Beta(R(T) k + β(1) k (s), R(F) k + β(2) k (s)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Reward learning rate ηr The above is a theoretical bound on learning from observations in state s, assuming a prior Beta distribution over [0, 1] for the reward probability r(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Some mice learn faster than others, and all of them will perform no better than the ideal observer model above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To allow for individual differences, and different learning rates, we introduce a model parameter ηr ∈ [0, 1], which dials the amount of data required for the same amount of learning as an ideal observer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The update rule (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', posterior) is r(s)|Rk ∼ Beta(ηrR(T) k + β(1) k (s), ηrR(F) k + β(2) k (s)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To keep track of learning parameters, let Bk = � βk(s) := � β(1) k (s), β(2) k (s) � : s ∈ S � be the beta parameters for beliefs about reward probabilities of all states in spatial step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that after visiting state s in spatial step k, βk+1(s) = ηrRk + βk(s) for s = Xk, and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1) βk+1(s′) = βk(s) for s′ ̸= Xk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that ηr is defined to have the same value across all states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If ηr = 1, the mice performs as well as the normative ideal observer, and if ηr = 0, the mouse never learns reward associations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For the values in between 0 and 1, the mouse requires extra data points for updating its beliefs to the same extent as an ideal observer model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The model parameter ηr can be interpreted as the data efficiency of learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It could be used to compare individual learning differences among subjects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, it is interesting to assess whether differences of ηr in individuals is predictive of comparative learning rates on other learning tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' It also provides a qualitative way to assess the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For example, if the value is unreasonably high, it may indicate a flaw in the state structure 20 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2: Guide for variables (Var) and parameters (Par) relevant to internal reward representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Var/Par Type Description Rk observed A binary pair representing the reward outcome of step k, (1, 0) lick and reward within step k (0, 1) lick but no reward within step k (0, 0) no lick within step k B(k) inferred List of � β(1) k (s), β(2) k (s) � , for all s ∈ S, where Beta( � β(1) k (s), β(2) k (s) � ) represents the beliefs about r(s) at spatial step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ηr model parameter A constant in the [0, 1] interval for learning rate of reward association.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' or an incorrect choice of prior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Implementation notes To simplify model implementation, we can derive the posterior distribution at step k by merely keeping a list record of the total count of positive and negative reward outcomes in state s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In particular, at step k, for state s, let ck(s) = � c(T) k (s), c(F) k (s) � be the total count of positive and negative outcomes in state s, from step 1 up to the start of step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' That is, ck(s) = k � n=1 Xn=s Rk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For current spatial state k, a list of numbers can store values of ck(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Assuming a uniform prior at the start of the experiment, or β(1) 1 (s) = β(2) 1 (s) = 1, the prior probability distribution of r(s) at step k is r(s) ∼ Beta(ηrc(T) k (s) + 1, ηrc(F) k (s) + 1), for which, βk(s) = ηrck(s) + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Learning state transitions Learning dynamics for state transitions p(s,s′) is defined similarly to the reward associ- ations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let E be the set of transition edges (directed edges), and let Adj(s) = {s′ : 21 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model (s, s′) ∈ E} be the set of states which for Xk = s, outcome of Xk is in Adj(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' There- fore, transition probabilities from s, P(Xk+1|Xk = s) is a distribution of outcomes over Adj(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Assuming fixed probability transitions, P(Xk+1|Xk = s) can be represented by a list of probabilities p(s) := � p(s,s′) : s′ ∈ Adj(s) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that if the subject is not familiar with the space, the true distribution is unknown, and the subject learns about these probabilities through experience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Normative model for updating internal transition representations (Bayesian) Every time the subject leaves state s and the next step is observed, one observation is made about the outcome of Xk+1 given Xk = s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Because the outcome is a multinomial random variable, where possible outcomes are states in Adj(s), we use a Dirichlet prior distribution to represent uncertainties about p(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Specifically, at spatial step k, p(s) ∼ Dir � αk(s) � where the list of parameters αk(s) contains an element corresponding to each possible outcome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In particular, αk(s) = � αk(s, s′) : s′ ∈ Adj(s) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Suppose Xk = s and consider an ideal observer whose prior beliefs about p(s) at spatial step k is described by Dir(αk(s)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Also suppose, the ideal observer visits the next state and makes the observation Xk+1 = ˘s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then by Equation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4, the posterior distribution is p(s)|(Xk+1 = ˘s, Xk = s) ∼ Dir � αk+1(s) � where any element αk(s, s′) of αk+1(s) is updated as follows: αk+1(s, s′) = 1 + αk(s, s′) for s′ = ˘s, and αk+1(s, s′) = αk(s, s′) for s′ ̸= ˘s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, for any other state s′′ ̸= s, it is obvious that the beliefs are not updated, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', αk+1(s′′ ̸= s) = αk(s′′ ̸= s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 22 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3: Parameter guide for learning transition probabilities .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Parameter(s) Type Description (Xk+1|Xk) observed Transition outcome from a given state Xk Ak inferred List of αk(s),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' for all s ∈ S,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' where Dir(αk(s)) represents beliefs about p(s) at step k (list of state transition probabilities from s to adjacent states) ηp free parameter A constant in the [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1] interval for learning rate of transition probabilities Reward learning rate ηp Similar to introducing a learning rate for learning reward association,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' we introduce a ηp ∈ [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1] to account for data inefficiency compared to the ideal observer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Denote by Ak, the list of all learning parameters of state transition probabilities Ak = � αk(s) : s ∈ S � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Now, the update rule (posterior distribution) is p(s)|(Xk+1, Xk) ∼ Dir � αk+1(s) � where any element αk(s, s′) of a list of parameters in Ak is updated as follows: αk+1(s, s′) = ηp + αk(s, s′) for s = Xk and s′ = Xk+1 (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3) αk+1(s, s′) = αk(s, s′) otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For an ideal observer, ηp = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The lower the value of ηp is, the slower the learning becomes, because the subject would require more data for similar updates in beliefs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If ηp = 0, the subject never learns from observing consecutive states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that the same parameter ηp is used for learning all transition probabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Implementation notes For prior beliefs about state transitions, a uniform prior would ensure that the prior does not privilege any probability value over another probability value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then, for any entry α1(s, s′) of α1(s, s′), we assume that α1(s, s′) = 1 So, at spatial step k, for entry αs′(s, k) of α(s, k), αk(s, s′) = ηpc(s,s′)(k) + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4) 23 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bayesian learning model where c(s,s′)(k) is the total number of observed transitions from s to s′ from step 1 to step k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' By keeping track of c(s,s′)(k) in a matrix, any parameter in A(k) can be calculated on demand using Equation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4 for the current state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 24 Chapter 4 Behavioral model part 2: the generative model In the previous chapter, I discussed the internal representations of spatial regions and reward probabilities within those regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This chapter describes a model that utilizes internal representations to generate behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The learning model for updating beliefs about reward probabilities and state transitions utilized a normative model of Bayesian learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In contrast, we present a descriptive model of behaviour that does not explic- itly enforce any optimal decision-making criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Before making normative assumptions about behaviour, it is important to have a descriptive framework for systematically as- sessing assumptions about behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Recall that location, visual stimulus, licking and speed of the mouse are recorded in the experimental data (see Chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To improve readability, Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 includes notation used to represent the behavioural data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A spatial state transition event triggers updating internal representations of reward prob- ability and spatial transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' During the period between two transition events, the pa- rameters associated with internal representations (specified by elements of Bk and Ak) are unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Assuming that the internal representations are guiding the behaviour, we define behavioural parameters for speed and licking rate derived from internal rep- resentations’ parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 describes the conditional dependence structure of parameters associated with a spatial state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In this model, the internal representations are used to derive two parameters that guide the licking and speed behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' These 25 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Spatial state parameter ˜λk: licking rate Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Behavioral and observational records for t ∈ {1, 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' , N}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Data Type Description xt Observation xt is the true value of the distance from the onset of the current corridor at time step t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' yt Observation yt ∈ Cor = {grey, vertical, angled} is the true value of the corridor type, which determines the visual stimuli at time step t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ot Observation ot is a binary value for whether the reward valve has opened during the time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' vt Behavior Speed (average) at time step t lt Behavior Number of licks at time step t parameters are target speed ˜νk, and licking rate ˜λk, and they are discussed in detail in the Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 and Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2: Description of updating internal representations of a given step using the graphical model of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Variables ( Var.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') and their parents (Par(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=')) are included in the first and second columns respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The third column (Type) indicates whether the outcome of the variable given its parents is stochastic ( Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') or deterministic ( Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') given its parents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The conditional dependence of the variable on its parents is described in the last column.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Var.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Par(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') Type Update description Xk+1 Xk Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Stochastic outcome of the state immediately following Xk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bk+1 Bk, Rk, Xk Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Updating reward probability distribution of the previous state using Equation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Ak+1 Ak, Xk+1, Xk Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Updating the transition probability distribution for the last transition using Equation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' distributions for reward, to Bk+1 rk Bk, Xk Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Reward distribution of the current state γk(ρ) Bk, Ak, Xk Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Discounted reward probability of present and future states given by Equation 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6, with the discount factor ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ˜νk γk(ρ) Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Value of target speed in spatial step k adjusted by value of γk(ρ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ˜λk rk Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Licking rate in step k given by Equation 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Rk ˜λk Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Reward outcome of spatial state k 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Spatial state parameter ˜λk: licking rate Consider the relevance of the reward probability distribution for rk to the licking be- haviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' First, it is reasonable to consider the mouse regulating its licking rate us- 26 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Spatial state parameter ˜λk: licking rate ing its perception of expected reward probability in the current state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The expected value of the reward probability in the current state (in step k) is the expected value of Beta(β(1) k (s), β(2) k (s)), which is µ(rk) = β(1) k β(1) k β(2) k .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1) Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1: Graphical model of updating internal representations at a given spatial step, the associated learning parameters (green), and the associated behavioural parameters (blue).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The dotted squares indicate internal representations that are not observed in the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Variables inside circles have stochastic outcomes given their parents, and variables inside squares have deterministic outcomes given their parents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' State transitions trigger updating these variables for the new step k + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that the model satisfies the Markov property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A description of the conditional dependencies is included in Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 27 Xk Xk+1 μ(rk) μ(rk+1) Yk(p) o(rk) Yk+1(p) o(rk+1) K+ k+1 Rk Rk+14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Spatial state parameter ˜λk: licking rate Second, independently from the expectation of reward, the degree of uncertainty about the true probability of reward may also be relevant to behaviour (Zhao & Warren 2015), and in particular, the rate of licking in the current state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' More variance in the reward probability may mean that the current state should be further explored by licking, to decrease the uncertainty about reward values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The variance reward probability beliefs can also be calculated from the Beta() distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' σ2(rk) = β(1) k β(2) k (β(1) k + β(2) k )2 (β(1) k + β(2) k + 1) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2) Let Lt be a random variable for the number of licks at time step t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We assume that the licking rate is generated by a Poisson distribution Lt ∼ Pois( ˜λk) where for model parameters ω1, ω2 and ω3, ˜λk = ω1µ(rk) + ω2σ(rk) + ω3, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3) is the licking rate at a time step spent within the current spatial step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The probability that Lt = lt, for a number of licks lt is given by P(Lt = lt) = λlt k e−λk lt!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4) Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3: Parameters relevant to the licking behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Parameter ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Type ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Description ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='˜λk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Spatial state ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='parameter ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Rate of the Poisson distribution generating the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='licking behavior within a time step spent in spatial ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='step k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='ω1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Model parameter ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Weight of the expected reward probability of the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='current reward distribution for calculating the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='spatial state parameter ˜λk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='ω2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Model parameter ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Weight of the standard deviation of the current ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='reward distribution for calculating the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='spatial state parameter ˜λk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='ω3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='Model parameter ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='base licking rate for calculating ˜λk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='28 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Parameter ˜νk: target speed within the current spatial state 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Parameter ˜νk: target speed within the current spatial state We noticed that the mouse tends to speed up if it does not expect a reward in upcoming states (for example, see Figures 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We model this behavior using a discounted measure of future rewards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Discounted future reward Expected average reward probability m steps after the current state s can be formulated as follows � s′∈S E(r[s′])P(Xk+m = s′|Xk = s) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5) Value of P(Xk+m|Xk) can be estimated by the transition probability matrix obtained by the expected value of transition probabilities and standard Markov chain transition properties (Equation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1) (Häggström et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To estimate the values of the transition probability matrix, we use the expected value of transition probability for p(s,s′), using parameters of Dirichlet distributions for transition probabilities in Ak;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' E[p(s,s′)] = αk(s, s′) � s′′∈Adj(s) αk(s, s′′) is the estimated probability value for p(s,s′) entry of the transition probability matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' To conclude the discussion for the calculation of expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5, note that E(r[s′]) = β(1) k (s′)/ � β(1) k (s′)β(2) k (s′) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Now, let us define the discounted future reward γk(ρ) for a fixed value of ρ in the current step k to be γk(ρ) := ∞ � m=0 ρm� s′∈S � E[r(s′)]P(Xk+m|Xk) � �∞ m=0 ρm (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6) Note that γk(ρ) is a normalised sum of discounted present and future expected re- ward probability values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Similar to the value function in reinforcement learning (Sut- ton & Barto 2018), or the concept of discounted cash flow in financial asset valuation 29 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Generative model of licking and speed (Damodaran 2012), it incorporates all future reward values by iteratively giving less weight to future rewards that are further away.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' When transitioning from one state to another, lower discounted future reward γk(ρ) is likely to indicate that the next reward is further away.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In this case, the mouse may choose to adjust its behavior (Kleinfeld et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2006), by speeding up to pass the unrewarded regions more quickly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Since the discounted value of future reward does not change as long as the mouse is in the same spatial state, the desired speed at the current spatial step can be modeled as a spatial state parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Let the target speed ˜νk for the current state be ˜νk := vmax � 1 − γk(ρ) � (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='7) where vmax is a model parameter that puts an upper bound on the target speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A simple model of speed for time step t is the following vt ∼ N( ˜νk, σ2 ν).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='8) However, physical constraints on the movement does not permit an instant jump in speed when the spatial state changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The alternative model of speed that takes the physical constraints into considerations (by adding more parameters), is vt+1 ∼ N(E[vt+1], V ar[vt+1]), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='9) where, (E[vt+1], V ar[vt+1]) = � � � � � � � � � (vt + δ+ v, σ2 v) for vt < ˜νk − ϵ, (vt + δ- v, σ2 v) for vt > ˜νk + ϵ, (vt, σ2 v) otherwise;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', for vt ∈ [ ˜νk − ϵ, ˜νk + ϵ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='10) where the model parameters δ+ v and δ- v are constant values for acceleration and deceler- ation, σ2 v is the variance of speed outcome in the next time-step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, model parameter ϵ determines the range where non-random acceleration or deceleration is not enforced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3 Generative model of licking and speed For given spatial states structure (by fixing parameters V and d), there exists a function fV,d : (xLoc × Cor) → S that associates each position to states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then it is possible to 30 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Generative model of licking and speed Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4: Parameters relevant to the speed behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Parameter Type Description ρ Model parameter Discount rate of future reward (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='6) ˜νk Spatial state parameter Target speed (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='7) σ2 ˜ν Model parameter Variance of speed in the first model (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='8) σ2 v Model parameter Variance of speed change Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='9 (second model) δ+ v, δ- v Model parameter Acceleration and deceleration rate (second model) ϵ Model parameter Range of random only of speed change (second model) determine time steps associated with state transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1, we assumed that the states are fully observable to the subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Therefore, the subject knows the value of fV,d at any current time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Binary variable Kt: indicator of spatial state transition event For the current time step t, let Kt be a binary variable such that Kt+1 = � � � 0, for fV,d(xt, yt) = fV,d(xt+1, yt+1) 1, for fV,d(xt, yt) ̸= fV,d(xt+1, yt+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='11) That is to say, Kt = 1 if (xt, yt) and (xt+1, yt+1) are not in the same state, ans so a state transition has occurred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that a spatial state transition triggers an update in the beliefs about the environment (reward probability within states and state transitions).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then the internal representations in the graphical model of Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 are updated to the next spatial step, and the behavioral parameters λkt+1 and ˜ nukt+1 correspond to the new spatial step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For Kt = 0, the behavioral parameters ˜λkt+1 and ˜ nukt+1 remain unchanged from the previous time-step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 is the graphical model for the generative model of behavior within time steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The model assumes that the spatial state associated with (xt, yt) is unambiguously de- termined by the subject (fully observable spatial states).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Therefore, the value of Kt+1, which indicates a state transition, is also observed by the subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Furthermore, Kt+1 can be deterministically inferred from the experimental data using the Equation 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Hence, it is also observed in the behavioral data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If Kt+1 = 1, then the graphical model of up- dating internal representations is used to find the new behavioral parameters (indicated by green arrows).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If Kt+1 = 0, the behavioral parameters remain unchanged from the previous step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A description of the relationships is included in Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 31 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Generative model of licking and speed Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5: Description of relationships in the generative model of behavior in the graphical model of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Variables ( Var.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') and their parents (Par(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=')) are included in the first and second column respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Third column (Type) indicates whether the outcome of the variable given its parents is stochastic ( Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') or deterministic ( Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') given its parents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The conditional dependence of the variable on its parents is described in the last column.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Var.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Par(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=') Type Update description Kt (xt, yt) (xt+1, yt+1) Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Transition event indicator (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ˜νkt+1 ˜νkt, Kt Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For Kt = 0, ˜νkt+1 = ˜νkt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Otherwise, spatial state changes, and graphical model 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 updates the value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ˜λkt+1 ˜λkt, Kt Deter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For Kt = 0, ˜λkt+1 = ˜λkt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Otherwise, spatial state changes, and graphical model 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 updates the value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' lt ˜λk Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Poisson distributed value with rate ˜λk (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3) vt ˜νk Stoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Speed at time step t by first model (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='8 ), or second model (Expression 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2: Graphical model of the generative model of behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that the variables and relationships drawn in yellow and brown are not part of the internal model, and they describe the conditional dependence of the observed values to the model variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' See table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='5 for description of the relationships.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 32 (Xt,yt) (Xt+1,Yt+1) Kt+1 K lt+1 Vt+14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Estimation of model parameters 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4 Estimation of model parameters Below, the general framework for estimating the model parameters is discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For a fixed spatial model of space MV,d, let θ be the list of model parameters θ := (V, d, ηr, ηp, ω1, ω2, ω3, σ2 ˜ν), (using the second speed model), or θ := (V, d, ηr, ηp, ω1, ω2, ω3, σ2 ˜v, δ + v, δ- v, ϵ) (using the first speed model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Given the model parameters, and given observational data, parents of vt and lt are deter- ministically set at each time point (see graphical model 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Therefore, speed and licking are independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' So model likelihood of the generative model of behaviour at time step t is L � θ|(vt, lt) � = P(vt, lt|θ) = P(vt|θ) P(lt|θ) ∼ f � µt(θ), σt(θ) � g � lt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' λt(θ) � where f are g are probability mass functions for Gaussian and Poisson distributions respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Note that their distribution parameters are deterministically fixed at each time point given the model parameters (see Equations 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='3, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='8 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Then model evidence for the generative model for up to time step N is L � θ ���{(vt, lt) : t = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' N} � ∝ N � t=1 f � vt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' µt(θ), σt(θ) � g � lt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' λt(θ) � (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='12) And we can then use the maximum likelihood estimation (MLE) to estimate the fitted model parameters θ∗ = argmax θ N � t=1 ln � f � vt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' µt(θ), σt(θ) � g � lt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' λt(θ) �� (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='13) Note that for each spatial step, the graphical model is used for calculating the parameters µt(θ), σt(θ) and λt(θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 33 Chapter 5 Discussion The next step in the project is to first complete the model validation on synthetic data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Before applying the model to real data, it is important to scrutinize the behaviour of the generative model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' We plan to do so by pre-determining values for a model parameter and generating synthetic behavioural data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The generated behaviour is then used as a given data set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If the model is well-behaved, the model parameters should be recoverable from the synthetic data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' As different spatial state structures radically alter the learning dynamics, we will conduct the parameter recovery for spatial model parameters more diligently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' By considering various alternative hypotheses (different values for d and V), the model evidence (equation 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='12) of alternative hypotheses will be compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' For a well-behaved model, the model evidence for the parameters used to generate data is expected to be the best.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='1 Limitations While our model assumes fully observable Markov states, noisy observations of the loca- tion and visual stimuli introduce uncertainty about the true current state of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Indeed, observations of the environment are often noisy and some behavioural models take this into account (Kang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Kersten & Mamassian 2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' While the learning rates of reward probability and transition probability capture some aspects of noisy obser- vations, they are not based on normative assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Alternatives should be considered for future research (Laquitaine & Gardner 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Fortunately, there is an extensive body of research on partially observable Markov decision processes (Monahan 1982, Kaelbling 34 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Implications et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 1996) that would provide a clear path for improving the current model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' An alternative to estimating the model parameters using MLE in Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='4 is to use the maximum a posteriori estimation (MAP) (Murphy 2012, Griffiths & Yuille 2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In contrast to MLE, which gives one estimated value for each parameter, MAP gives a distribution for each parameter, characterising the level of uncertainty about each parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Since some of the model parameters are qualitatively interpretable, MAP may be particularly relevant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' In particular, a distribution over possible options for V, the set of discriminated visual stimuli, is highly relevant to the imaged activity of the visual cortex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The potential challenge of MAP is that the computational difficulty of the calculation may introduce implementation challenges that are difficult to resolve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Nonetheless, its estimation of model parameters are potentially more meaningful for studying visual perception.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2 Implications During the experiments, two-photon calcium imaging and optogenetics were performed to determine changes in inputs and activity of individual excitatory and inhibitory cells within the primary visual cortex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Previously, a multivariate auto-regressive linear model (MVAR) was fitted to the neuronal data (Khan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2018): qt+1 = qt + A × qt + ut + ξvt where qt is the vector of response levels at time step t for all n imaged neurons, A is an n × n matrix that includes the fitted interaction parameters, ut is a fitted vector for the stimulus-related input, and ξ is a fitted parameter for the contribution of current speed vt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The MVAR model was used to compare the activity of populations of different inhibitory and excitatory cell types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' The only behavioural term that was included was speed vt, which did not make a significant contribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' An immediate application of the current behavioural model presented in this report is to potentially improve the MVAR model by including parameters related to internal representations, In particular, learned parameters that are likely to be relevant to behaviour, namely expected reward probability µ(rk), variance σ2(rk), and discounted future reward γk(ρ) could potentially improve the predictive power of the MVAR model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' If the internal representation terms from the behavioural model improve the predictive power of the MVAR model, it will give new insights into the information encoded in neurons located in the primary visual cortex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Future experiments can then be designed to 35 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Implications systematically manipulate these internal terms to understand the precise representations (Heilbron et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' This will help us understand how the structure of the environment changes learning dynamics and internal representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 36 Bibliography Bibliography Barlow, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (1961), ‘Possible principles underlying the transformation of sensory messages’, Sensory communication 1, 217–234.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Beck, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Ma, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Kiani, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Hanks, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Churchland, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Roitman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Shadlen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Latham, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Pouget, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2008), ‘Probabilistic population codes for bayesian decision making’, Neuron 60(6), 1142–1152.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Berkes, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Orbán, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Lengyel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Fiser, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2011), ‘Spontaneous cortical activ- ity reveals hallmarks of an optimal internal model of the environment’, Science 331(6013), 83–87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Bishop, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2006), Pattern recognition and machine learning, springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Chen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Wardill, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Sun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Pulver, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Renninger, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Baohan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Schreiter, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Kerr, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Orger, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Jayaraman, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2013), ‘Ultrasensitive fluorescent proteins for imaging neuronal activity’, Nature 499(7458), 295–300.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Damodaran, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2012), Investment valuation: Tools and techniques for determining the value of any asset, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 666, John Wiley & Sons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Dombeck, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Harvey, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Tian, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Looger, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Tank, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2010), ‘Functional imaging of hippocampal place cells at cellular resolution during virtual navigation’, Nature neuroscience 13(11), 1433–1440.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Fiser, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Berkes, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Orbán, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Lengyel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2010), ‘Statistically optimal perception and learning: from behavior to neural representations’, Trends in cognitive sciences 14(3), 119–130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Fishell, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Kepecs, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2019), ‘Interneuron types as attractors and controllers’, Annual 37 Bibliography review of neuroscience 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Geisler, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2003), ‘Ideal observer analysis’, The visual neurosciences 10(7), 12–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Geisler, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2011), ‘Contributions of ideal observer theory to vision research’, Vision research 51(7), 771–781.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Griffiths, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Yuille, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2008), ‘A primer on probabilistic inference’, The probabilistic mind: Prospects for Bayesian cognitive science pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 33–57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Häggström, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2002), Finite Markov chains and algorithmic applications, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 52, Cambridge University Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Harvey, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Collman, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Dombeck, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Tank, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2009), ‘Intracellular dynam- ics of hippocampal place cells during virtual navigation’, Nature 461(7266), 941–946.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Heeger, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2017), ‘Theory of cortical function’, Proceedings of the National Academy of Sciences 114(8), 1773–1782.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Heilbron, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Richter, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Ekman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Hagoort, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & De Lange, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2020), ‘Word contexts enhance the neural representation of individual letters in early visual cortex’, Nature communications 11(1), 1–11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Kaelbling, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Littman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Moore, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (1996), ‘Reinforcement learning: A survey’, Journal of artificial intelligence research 4, 237–285.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Kang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Mahr, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Nagy, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Andrási, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Csibra, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Lengyel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='), ‘Eye movements reflect causal inference during episodic memory retrieval’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Kepecs, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Fishell, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2014), ‘Interneuron cell types are fit to function’, Nature 505(7483), 318–326.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Kersten, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Mamassian, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2009), ‘Ideal observer theory’, Encyclopedia of neuroscience 5, 89–95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Khan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Poort, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Chadwick, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Blot, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Sahani, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Mrsic-Flogel, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Hofer, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), ‘Distinct learning-induced changes in stimulus selectivity and interactions of gabaergic interneuron classes in visual cortex’, Nature neuroscience 21(6), 851–859.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Kleinfeld, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Ahissar, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Diamond, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2006), ‘Active sensation: insights from the 38 Bibliography rodent vibrissa sensorimotor system’, Current opinion in neurobiology 16(4), 435–444.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Kriegeskorte, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Douglas, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), ‘Cognitive computational neuroscience’, Nature neuroscience 21(9), 1148–1160.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Laquitaine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Gardner, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), ‘A switching observer for human perceptual estimation’, Neuron 97(2), 462–474.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Maloney, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Mamassian, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2009), ‘Bayesian decision theory as a model of human visual perception: Testing bayesian transfer’, Visual neuroscience 26(1), 147–155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Monahan, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (1982), ‘State of the art—a survey of partially observable markov decision processes: theory, models, and algorithms’, Management science 28(1), 1–16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Murphy, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2012), Machine learning: a probabilistic perspective, MIT press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Orbán, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Fiser, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Aslin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Lengyel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2008), ‘Bayesian learning of vi- sual chunks by human observers’, Proceedings of the National Academy of Sciences 105(7), 2745–2750.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Poort, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Khan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Pachitariu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Nemri, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Orsolic, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Krupic, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Bauza, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Sahani, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Keller, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Mrsic-Flogel, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2015), ‘Learning enhances sensory and multiple non-sensory representations in primary visual cortex’, Neuron 86(6), 1478–1490.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Pouget, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Dayan, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Zemel, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2003), ‘Inference and computation with population codes’, Annual review of neuroscience 26(1), 381–410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Saleem, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Diamanti, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Fournier, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Harris, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Carandini, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), ‘Coherent encoding of subjective spatial position in visual cortex and hippocampus’, Nature 562(7725), 124–127.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Sutton, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Barto, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), Reinforcement learning: An introduction, MIT press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Yan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Rasch, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Chen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Xiang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Huang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=', Wu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Li, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2014), ‘Perceptual training continuously refines neuronal population codes in primary visual cortex’, Nature neuroscience 17(10), 1380–1387.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Yang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Maunsell, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2004), ‘The effect of perceptual learning on neuronal re- 39 Bibliography sponses in monkey visual area v4’, Journal of Neuroscience 24(7), 1617–1626.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Yap, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Greenberg, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2018), ‘Activity-regulated transcription: bridging the gap between neural activity and behavior’, Neuron 100(2), 330–348.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' Zhao, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' & Warren, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' (2015), ‘How you get there from here: Interaction of visual landmarks and path integration in human navigation’, Psychological science 26(6), 915–924.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} +page_content=' 40' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE0T4oBgHgl3EQfygIs/content/2301.02659v1.pdf'} diff --git a/EdE4T4oBgHgl3EQfGgyE/content/tmp_files/2301.04895v1.pdf.txt b/EdE4T4oBgHgl3EQfGgyE/content/tmp_files/2301.04895v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..d1233c03d3681125b96c5732978a1f8477ca600c --- /dev/null +++ b/EdE4T4oBgHgl3EQfGgyE/content/tmp_files/2301.04895v1.pdf.txt @@ -0,0 +1,1441 @@ +MNRAS 000, 1–12 (2022) +Preprint 13 January 2023 +Compiled using MNRAS LATEX style file v3.0 +Distribution and dynamics of decimeter-sized dust agglomerates in the +coma of 67P/Churyumov–Gerasimenko. +Pablo Lemos,1,2★, Jessica Agarwal1,2, Matthias Schröter3 +1Institut für Geophysik und Extraterrestrische Physik, Technische Universität Braunschweig, Mendelssohnstraße 3, Braunschweig 38106, Germany. +2Max Planck Institute for Solar System Research, Justus-von-Liebig-Weg 3, Göttingen 37077, Germany. +3Max Planck Institute for Dynamics and Self-Organization, Am Faßberg 17, D-37077 Göttingen, Germany. +Accepted XXX. Received YYY; in original form ZZZ +ABSTRACT +We present a method to analyze images of the coma of 67P/Churyumov–Gerasimenko obtained using OSIRIS, the main imaging +system onboard Rosetta, where dust aggregates can be seen as bright tracks because of their relative velocity with respect to +the spacecraft. We applied this method to 105 images taken in 2015 July, 2015 December and 2016 January, identifying more +than 20000 individual objects. We performed a photometric analysis of them, finding their phase function. This phase function +follows the same trend as the one found for the nucleus, consistent with the detected particles having a size larger than ∼ 1 +mm. Additionally, the phase function becomes shallower for increasing heliocentric distances, indicating a decrease in the mean +agglomerate size. In order to characterize the agglomerates observed in the image, we developed a simplified model for their +ejection and dynamics in the coma, and generated synthetic images based on it. We solved the inverse problem by finding the +simulation parameters that give the best fit between synthetic and real images. In doing so, we were able to obtain a mean +agglomerate size ∼ dm and initial speed ≃ 1 m s-1. Both show a decrease with increasing heliocentric distance, sign of the +reduction in activity. Also, the sizes obtained by the comparison are not compatible with ejection caused by water activity, so +other sources have to be invoked, mainly CO2. +Key words: methods: data analysis – methods: numerical – comets: individual: 67P/Churyumov–Gerasimenko. +1 INTRODUCTION +The Rosetta mission provided data with unprecedented detail on +comet 67P/Churyumov–Gerasimenko (hereafter 67P) by sampling its +environment in situ during a period of around two years. In particular, +cometary dust particles over a wide range of sizes were collected, +analyzed and characterized by MIDAS (for particles in the range 𝜇m +to tens of 𝜇m, Mannel et al. 2019; Longobardo et al. 2022), COSIMA +(tens of 𝜇m – hundreds of 𝜇m, Merouane et al. 2017) and GIADA +(hundreds of nm – tens of mm, Della Corte et al. 2019; Longobardo +et al. 2022) instruments. Larger objects (≳ 1 cm) could be detected by +the main imaging system onboard Rosetta, the Optical, Spectroscopic +and Infrared Remote Imaging System (OSIRIS, Keller et al. 2007). +The data obtained by OSIRIS make it possible to obtain information +about the morphological and dynamical properties of the dust, and +in case the same object could be identified in more than one image +while using different filters, also about its color, which can give hints +about its composition (Frattin et al. 2017; Kwon et al. 2022). +However, remotely analyzing individual dust particles or aggre- +gates in the coma must face a fundamental issue: the distance from +the sensor to the object is unknown, so its size and velocity, and +hence size and mass distribution, cannot be uniquely determined. +Several works deal with this issue in different ways: Rotundi et al. +(2015) and Fulle et al. (2016) assume that the motion of the objects +★ E-mail: j.lemos-velazquez@tu-braunschweig.de +is entirely radial from the nucleus, and that the apparent motion with +respect to the camera comes mainly from the spacecraft velocity. +Using those assumptions, the distance can be determined using the +parallax effect. Agarwal et al. (2016) and Pfeifer et al. (2022) use +images where the nucleus limb is present, and focus on agglomerates +going away from it. These agglomerates have a higher probability +of being recently ejected, so it can be assumed that they are at the +same distance as the nucleus; Drolshagen et al. (2017) and Ott et al. +(2017) exploit the fact that the two detectors of OSIRIS, the Narrow +(NAC) and Wide (WAC) Angle Cameras, are separated by ≃ 70 cm +on the spacecraft, so if both cameras detect the same object, the +parallax effect can be used to measure its distance to the camera; +in Güttler et al. (2017) it is noted that objects closer than ∼ 100 m +appear unfocused in WAC images, so the authors develop a method +to measure the distance to objects close to the camera by measuring +the apparent size of the unfocused pattern, which is directly related +to its distance; finally, Frattin et al. (2021) uses a mixed approach, +constraining the sizes and distances of the dust agglomerates based +on speed distributions taken from some of the works listed before, in +combination with photometric simulations. +The approach of this work is different from that of its predecessors. +Instead of looking for an alternative method for determining the dis- +tance, we propose to bypass this requirement by using a combination +of observations and statistical modelling. On the one hand, images +taken by OSIRIS are analyzed in order to obtain a set of observables +from the distribution of dust agglomerates present in each of them. +© 2022 The Authors +arXiv:2301.04895v1 [astro-ph.EP] 12 Jan 2023 + +2 +P. Lemos +On the other hand, we simulate the trajectories of dust agglomerates +through the coma using a simplified ejection and dynamical model. +These trajectories are characterized by different dust parameters, such +as size, density and initial velocity. Based on these simulations and +the spacecraft position and orientation, a group of synthetic images +of the agglomerates as seen by OSIRIS are generated. Using these +synthetic images, the inverse problem is solved by optimizing the pa- +rameter choice for the dynamical simulations in order to reproduce +properties of the dust agglomerate trajectories observed in the real +images. This approach is also different from that applied in previ- +ous works in that the properties of the entire population of detected +objects are analyzed statistically, rather than dealing with individual +objects as done previously. +The work is organized as follows: in Section 2 the datasets and +the dust agglomerate detection method are described. The dynamical +model and the synthetic image generation are explained in Section +3. In Section 4 we present the analysis of the properties of dust +agglomerates found in OSIRIS images. In Section 5 the synthetic +images are compared with the real ones. Finally, we present our +conclusions in Section 6. +2 OBSERVATIONS AND TRACKS DETECTION +Rosetta escorted 67P from 2014 August when its heliocentric dis- +tance was ≃ 3.7 au inbound, to 2016 September when it was out- +bound at ≃ 3.8 au from the Sun. For this work we will focus on +three different image sets obtained with the OSIRIS NAC around +perihelion. All these image sequences were obtained under the oper- +ational activity DUST_PHASE_FUNCTION, originally devoted to +the analysis of the dusty coma brightness as a function of the phase +angle (i.e. the angle between the Sun-spacecraft and camera pointing +direction) and the wavelength. In order to achieve this, the observ- +ing conditions were such that the distance from the nucleus to the +spacecraft remained nearly constant throughout the duration of the +acquisition, while the camera pointing scanned the coma at different +phase angles. The plane of observation was nearly perpendicular to +that containing the Sun, the nucleus, and the spacecraft. A sketch of +the observation geometry can be seen in Fig. 1. +The image sets used in this work were acquired on 2015 July 7, +2015 December 14 and 2016 January 21, at heliocentric distances of +1.32 au inbound, 1.89 au outbound and 2.18 au outbound respectively. +All three sets were taken using the Blue F24 (peak transmission at +480.7 nm), Orange F22 (649.2 nm) and Red F28 (743.7 nm) filters. +A binning of 4 × 4 was used for all images, so the final image size +is 512 × 512 pixels. A summary of the observing conditions can be +found in Table 1. +Depending on the heliocentric distance and the filter used, the +images were obtained using exposure times ranging from 7 to 146 +seconds. These exposure times combined with the nonzero relative +velocity between the spacecraft and the dust agglomerates result in +them appearing in the images not as point sources, but instead as +elongated tracks. This fact will be exploited later, at the moment of +the object detection. +A total of 105 level 3F images were used for this work. These +images are radiometrically calibrated, corrected for geometric distor- +tion and for solar and in-field stray light, and expressed in reflectance +units, i.e. the corrected flux is normalized by the solar flux at the +corresponding heliocentric distance. A detailed description of the +data processing steps can be found in Tubiana et al. (2015). Despite +being corrected for stray light effects, some of the high phase an- +gle images present illumination artefacts that complicate the track +Figure 1. Sketch of the observation geometry. The solar (blue dashed) and +spacecraft (red dotted) directions with origin in the nucleus form a perpen- +dicular angle. The pointing of the camera, shown as violet solid lines, scan +the coma for different phase angles in the plane roughly perpendicular to that +of the spacecraft, nucleus and Sun. Note that the image is not to scale. +detection. This problem is more evident in images taken at phase +angles greater than 100◦, that is, when the camera pointing is closer +to the Sun direction, so the results in this range should be treated +with caution. +2.1 Detection method +A semi-automatic detection method based on the one presented in +Frattin et al. (2017) was used. The steps involved in this method are: +• A similarity map 𝑆𝑀𝜃 is created using a track template +𝑇𝜃. These templates consist of a square window of 10 pixels in +length, where a straight line representing the track passes through +the centre of the template. The orientation angle 𝜃, defined as +𝜃 = arctan(−1/𝑚)1, where 𝑚 is the slope of the line, successively +takes all the values in the [−90◦, +89◦] range, with steps of 4◦. 𝑆𝑀𝜃 +is calculated as the normalized cross correlation (NCC) between +each image 𝐼 and the template 𝑇𝜃, and applying the convolution of +the result with the same template +𝑆𝑀𝜃 = (𝐼 ¯⊗𝑇𝜃) ⊗ 𝑇𝜃. +(1) +• Binary images are generated from the similarity maps for each +orientation by imposing a lower threshold defined as 𝐽 + 2𝑆, where +𝐽 and 𝑆 represent the local median and standard deviation of the +1 We use this definition for the orientation angle in order to match the one +used in the Hough transform later in the algorithm. +MNRAS 000, 1–12 (2022) + +Z +Y +XDynamics of dust in the coma of 67P +3 +Table 1. Image sets used for this work. Columns represent the mid and short-term planning cycles (intervals of roughly one month and one week), date of +acquisition, filters and exposure times used, heliocentric distances, nucleocentric distances and number of images in the set. +Planning cycle (MTP/STP) +Date +F (𝑡𝑒𝑥 𝑝) +𝑟ℎ (au) +𝑟𝑆/𝐶 (km) +#𝐼 +018/063 +2015-07-07 +F22 (7 s), F24 (73 s), F28 (40 s) +1.32 +153.4 +45 +023/086 +2015-12-14 +F24 (73 s), F22 (7 s), F28 (40 s) +1.89 +102.6 +21 +025/092 +2016-01-21 +F24 (146 s), F22 (14 s), F28 (80 s) +2.18 +79.2 +39 +NCC respectively. Nonzero pixels in these binary images represent +locations in the image with high probability of having a track with a +determined orientation. +• Tracks are detected from each binary image using a Hough +transform method (Hough 1962, Duda & Hart 1972). The outcome +of this step is called nominal track. +• To characterize the nominal tracks, segments perpendicular to +the track are analyzed. The centres of the segments are equally spaced +on the track, with a distance of 1/3 pixel between them. Brightness +profiles are then generated by interpolating the image values over the +segment positions. For each profile, two parameters are defined: its +brightness peak value, and the residual distance to the nominal track, +defined as the distance in pixels from the nominal track to the peak +position along the mentioned segment. Once these parameter pairs +are defined for all segments, the track is characterized by a boundary +region, i.e. a region in the brightness–residual space enclosed by the +convex hull of all the pairs, extended by the standard deviation along +each axis (Fig. 2). +• The nominal tracks are corrected for incomplete detection. First, +the nominal track is preliminarily extended by 5 pixels. Then, the +brightness–residual pairs are defined for the extended part, and com- +pared with the boundary region defined in the previous step. If the +points corresponding to the extended part lie inside the boundary +region, the line is extended. The process is repeated until the added +points do not belong to the region or the image edge is reached. An +example of this process is shown in Fig. 2. +• The extended tracks are analyzed in the search for duplicate +detections. This is done by comparing the pixels spanned by the +tracks. If two extended tracks share more than 70% of their pixels, +the tracks are merged. +• A manual inspection and correction of the results is performed. +By using this method, a total of 20033 tracks were detected. This +number is larger by at least an order of magnitude from previous +studies focused in detecting and analyzing this type of tracks in +similar images. It is worth noticing that since the track templates +𝑇𝜃 have a size of 10 pixels, our algorithm is unable to find tracks +shorter than that length. A smaller template size would have meant +that shorter tracks could be detected, but also increases the chance of +mistakenly identifying a group of bright background pixels as a real +track. In order to check if this choice of template size introduces a bias +in the detected tracks, we checked the properties a dust agglomerate +must have to generate a track of this length. The projected track +length in pixels 𝑙𝑝𝑖𝑥 depends on the agglomerate projected speed +𝑣, distance to the camera 𝑑 and the image exposure time 𝑡𝑒𝑥 𝑝. The +equation describing the track length as a function of these variables +is +𝑙𝑝𝑖𝑥 = +𝑡𝑒𝑥 𝑝 +𝑑 𝑅𝑁 𝐴𝐶 +𝑣, +(2) +where 𝑅𝑁 𝐴𝐶 is the angular resolution of the camera. For image +sets similar to the ones used here, Frattin et al. (2021) estimated the +Figure 2. Example of the track correction and extension method. Top: The +orange points represent the peak values positions from the segments per- +pendicular to the track, which was obtained from the Hough transform. The +detected track fails to cover the entire length on the left side (the gaps in +between are caused by overlapping background stars). Using these positions +and brightness values, the boundary region is defined in the bottom panel. +The same procedure is made for profiles in the extended region. The violet +symbols from the extended track to the left lie inside this boundary region, +so the line is extended. On the contrary, the green points on the right side are +not. +maximum agglomerate to camera distance of 18 km. At that distance, +the minimum agglomerate projected speed needed for generating a +track longer than 10 pixels is 0.4 m s-1 for images taken with the +Blue and Red filters, but 1m s-1 for the ones corresponding to the +Orange filter. Since Ott et al. (2017) found that the median apparent +speed of this type of agglomerates is 0.6 m s-1, we can conclude that +the tracks detected in the images taken with the Orange filter sample +a population of agglomerates that has a higher relative speed to the +spacecraft, is closer to the camera, or a combination of both. +MNRAS 000, 1–12 (2022) + +Boundary +6 + Detected line +Ext - right +Ext - left +brightness +5 +Norm +0 +0 +2 +4 +6 +Residual (pix)4 +P. Lemos +3 IMAGE MODELLING +3.1 Dust dynamics model +Synthetic images were generated by modelling the trajectories of dif- +ferent types of dust agglomerates in the comet coma, and looking for +intersections with the camera FOV. A simplified model for comput- +ing the dust agglomerate trajectories was developed. This model is +initially developed in 2D, and assuming the activity is axially sym- +metrical with respect to the solar direction, the 3D trajectories are +obtained by rotating the 2D ones with respect to the solar direction +by a random angle. In this model, the nucleus is represented by a +sphere of radius 𝑅𝑁 = 2000 m and mass 𝑀𝑁 = 9.982 × 1012 kg. +The dust agglomerates are assumed to be spherical with radius 𝑟𝑑 +and density 𝜌𝑑, and are under the influence of three forces: nucleus +gravity 𝐹𝐺, radiation pressure 𝐹𝑅 and gaseous drag 𝐹𝐷, expressed +as +FG = − G𝑀𝑁 𝑚 +𝑟2 +r +𝑟 +(3) +FR = − +𝑐⊙𝑄𝑅𝑃𝜋𝑟2 +𝑑 +𝑟2 +ℎ𝑐 +rh +𝑟ℎ +(4) +FD = |vg − vd|2 +2 +𝜌𝑔𝜋𝑟2 +𝑑𝐶𝐷 +V +𝑉 , +(5) +where 𝑚 = 4/3𝜋𝜌𝑑𝑟3 +𝑑 is the object mass, r is the position of the +object with respect to the nucleus, V = vg −vd is the relative velocity +between the agglomerate and the gas, 𝑐⊙ = 1361 W m-2 is the +solar constant, 𝑄𝑅𝑃 is the scatter efficiency for radiation pressure +(assumed to be equal to 1), 𝑟ℎ is the heliocentric distance expressed +in au, 𝑐 is the speed of light, vg and 𝜌𝑔 are the gas velocity and +density respectively, and 𝐶𝐷 is the drag parameter, calculated using +the free-molecular expression (Bird 1994) as +𝐶𝐷 = 2𝑠2 + 1 +𝑠3√𝜋 exp (−𝑠2) + 4𝑠4 + 4𝑠2 − 1 +2𝑠4 +erf(𝑠) + 2√𝜋 +3𝑠 +√︄ +𝑇𝑑 +𝑇𝑔 +, +(6) +where 𝑠 = 𝑉/ +√︁ +2𝑇𝑔𝑘𝐵/𝑚𝑔, and the dust temperature 𝑇𝑑 is assumed +to be equal to the gas one 𝑇𝑔. Computing the gas drag force requires +a description of the density and velocity of the gas in the coma, for +which an intermediate step needs to be included (see Section 3.2). +The initial position of the dust agglomerates is chosen from a +probability distribution function that has the same dependence on the +subsolar angle as the gas production rate, obtained from the model +by Fulle et al. (2020) (see Sec. 3.2). The initial velocity modules are +chosen from a Maxwell–Boltzmann distribution +𝑓 (𝑣) = +4 +√𝜋 +𝑣2 +𝑣𝑃3 exp −(𝑣2/𝑣𝑃2), +(7) +where 𝑣𝑃 is the most probable speed. In order to represent the surface +roughness in a simplified way, we include a tangential component to +the initial velocity, such as its direction forms an angle 𝜃𝑖 with the +local normal. 𝜃𝑖 is chosen from a normal distribution centred on the +free parameter 𝜃𝑑 and with standard deviation of 20◦, except for the +case 𝜃𝑖 = 0◦, when all the agglomerates start with radial velocities. +The dust agglomerates are then characterized by four parameters: +their density 𝜌𝑑, radius 𝑟𝑑, most probable initial speed 𝑣𝑃 and most +probable initial direction 𝜃𝑑 from the surface normal. The dynamical +simulations were carried out individually for each combination of +those parameters. The values used for each parameter are listed on +Table 2. A total of 1176 parameter combinations were used for the +dynamical integrations, except in the case of STP063, where 1470 +Table 2. Values used for the dynamical simulations for each dust parameter. +The radii marked with a * were only used for the set STP063. +Parameter +Values +𝜌𝑑 +[1; 10; 50; 100; 200; 500; 800] kg m-3 +𝑟𝑑 +[0.01; 0.05; 0.1; 0.5; 1; 5; 10; 50; 80∗; 100∗] cm +𝑣𝑃 +[0; 0.5; 1.0; 2.0; 5.0; 10.0] m s-1 +𝜃𝑑 +[0; 20; 40; 60] ◦ +combinations were used. Even though some of these combinations +do not represent any physically meaningful particle, they are none +the less simulated in order to better comprehend the impact of the +parameter choice on the results. +3.2 Gas model +The gas simulations are done in two parts. First, the gas production +rate is calculated based on the model presented by Fulle et al. (2020). +This model assumes the nucleus surface to be composed of cm-sized +pebbles and water ice sublimating inside them. When the surface +temperature is larger than 205 K, the pressure inside the pebble is +high enough to overcome its tensile strength, making dust ejection +possible. Using the heliocentric distances obtained from the header +of the images, the production rate as a function of the subsolar angle +is calculated (Fig. 3). +For the second part, this production rate is used as a boundary +condition for the hydrodynamic simulations for the distribution of +gas in the coma. As in previous works (Zakharov et al. 2018, 2021), +the initial speed of the gas on the nucleus surface is set to the local +sound speed. The gas flow is modelled through the Euler equations, +which imply the gas is considered to be ideal, at equilibrium and +without viscous dissipation or heat conductivity. The hydrodynamic +simulations are carried out in 2 dimensions using the code PLUTO +(Mignone et al. 2007) until a static solution is achieved (Fig. 3). +3.3 Generation of synthetic images +Using the results of the gas simulations discussed in Section 3.2, +the system described by equations 3–5 can be numerically solved. +The two-dimensional dust agglomerate trajectories obtained from the +dynamical modelling are then transformed into three dimensions by +using the symmetry assumptions mentioned in Section 3.1. These +three-dimensional trajectories are checked for possible intersections +with the camera FOV. If such intersection occurs, two intersection +points, entry and exit, are defined. A random position r1 inside the +FOV is selected from a linear interpolation between the intersection +points. This will be used as one of the endpoints of the synthetic track. +The remaining endpoint is defined as r2 = r1 ± v1 × 𝑡𝑒𝑥 𝑝, where v1 +is the interpolated velocity at r1, 𝑡𝑒𝑥 𝑝 is the image exposure time and +the sign is chosen randomly. While r1 is enclosed within the camera +FOV, that is not necessarily the case for r2. Both endpoints are then +projected into the detector plane, obtaining the projected track. The +last step involves checking if the track would be bright enough to be +detected. For this, tracks for which the mean distance between r1,2 +and the camera is larger than a limit distance Δ are discarded. This +limit distance is calculated from the equation (Agarwal et al. 2016) +Δ = +√︄ +𝑟𝑑2 𝑝 Φ(𝛼) 𝐼⊙ +𝐽 𝑟ℎ2 +, +(8) +MNRAS 000, 1–12 (2022) + +Dynamics of dust in the coma of 67P +5 +Figure 3. Gas simulations for the set STP092. Top: production rate per area +unit and surface temperature as a function of the insolation angle. The dashed +line indicates the 205 K limit from which dust ejection is possible. Bottom: +Static solution for the gas flow. From top left to bottom right, the panels +represent density, pressure, radial and tangential velocities in arbitrary units. +The nucleus is at the origin, the illumination comes from the positive 𝑥 axis +and the distances are expressed in units of 𝑅𝑁 . +where 𝑝 and Φ(𝛼) are the geometric albedo and phase function of +the agglomerate respectively, 𝐼⊙ the solar flux in the corresponding +filter with units of W m-2 nm-1, 𝑟ℎ the heliocentric distance in au +and 𝐽 is the image background brightness, estimated as its median, +with units of W m-2 nm-1. +4 ANALYSIS OF THE DETECTED TRACKS +4.1 Orientation +In the first place, the distribution of the orientation angle of the tracks +is analyzed by generating their histograms when grouping them into +20 bins spanning the [−90◦ : +89◦] interval, and normalizing the +histogram such that the sum of the bar heights equals one. Then, a +modified von Mises distribution is used to fit the histogram. The von +Mises distribution is an approximation of the normal distribution for +a periodic domain, expressed by the equation +𝑓 (𝑥) = +exp +� cos (𝑥−𝜇) +𝜎2 +� +2𝜋𝐼0(1/𝜎2) +, +(9) +where 𝜇 and 𝜎 represent the mean and standard deviation respec- +tively, and 𝐼0 is the modified Bessel function of the zeroth order. +This function is defined over the [0, 2𝜋] domain, so it is modified in +order to match the one of the orientation angles. An example of the +Figure 4. Normalized histogram for the orientation angle of the tracks de- +tected in the image taken in STP092 at a phase angle of 50◦with the Red filter. +The bottom panel shows the von Mises fit obtained for the data. +normalized histogram for the orientation angles and the von Mises +fit can be seen in Fig. 4. +The mean orientation angle of the tracks in the images as a func- +tion of the phase angle can be seen in Fig. 5. We find that there is +a difference between the mean direction of the tracks and the ra- +dial direction. The radial direction is defined as the direction of the +spacecraft–nucleus vector projected into the image plane. From this +Figure we notice that this discrepancy shows two particular features. +First, the deviation is not constant along each set, but depends on +the phase angle. Second, in almost all cases, the deviation from the +radial direction depends on the exposure time of the images: the +shorter the exposure time, the closer the mean direction of the tracks +is to the radial one. In Section 5.1 we discuss about the origins of +these features. +4.2 Phase function +For computing the phase function of the tracks, the photometry of +all tracks completely enclosed in the image was performed. The +method is similar to the one described in Güttler et al. (2017), namely +performing a morphological dilation of the original track with two +ring sizes, in order to obtain two stadium shapes enclosing the track. +The size of the discs used for the dilation are estimated from the +local gradient image 𝐺 (Fig. 6). The gradient image 𝐺 is calculated as +𝐺 = +√︃ +𝐺2𝑥 + 𝐺2𝑦, where 𝐺 (𝑥,𝑦) are the directional gradients obtained +using a Gaussian kernel. The pixels contained in the inner shape are +summed to obtain the total track brightness, while the background +is estimated as the median value of the pixels between both shapes, +and subtracted from the brightness of the central shape. +The brightness of all the detected tracks entirely contained in the +images are represented by the small, coloured dots in Fig. 7. This +quantity does not depend on the apparent speed of the agglomer- +ates (except in the case the apparent speed is small and the track +is shorter than 10 pixels, see Sec. 2.1), but only on their distance +to the camera, size and scattering properties. The image acquisition +method consists of keeping the spacecraft in the same position and +rotate it to obtain images at different phase angles. We assume that +MNRAS 000, 1–12 (2022) + +X1021 +2 +300 +1.5 +250 +K +1 +(m +T +Q +200 +0.5 +0 +150 +0 +20 +40 +60 +80 +Zenith angle (deg-2 +log p +P +-4 +log +-4 +6 +-20 +0 +20 +-20 +0 +20 +R +R +2 +1.8 +0.4 +1.6 +0.2 +1.4 +1.2 +0 +-20 +0 +20 +-20 +0 +20 +R +R0 +-45 +45 +-90 +90 +0 +0.1 +0.2 +0.3 +0.3 + data +Bin counts +0.2 +von Mises fit +0.1 +-90 +-45 +0 +45 +90 +Orientation angle (deg)6 +P. Lemos +Figure 5. Mean direction of the tracks in the images. The symbol represents +the mean and the errorbar the standard deviation of the von Mises distribution +fitted to the orientation angle histogram. +all observed agglomerates are in the vicinity of the spacecraft and +that the population of dust agglomerates generating the tracks for a +given set is similar for all phase angles. From this we can estimate the +phase function of the agglomerates performing a statistical analysis +of the brightness, by assuming that the most representative value for +a certain phase angle is the median of all tracks for that image. +However, the set of track brightness is twofold biased. First, as +mentioned before images taken at high phase angles are contami- +Figure 6. Example of the photometry performed on the tracks. The two +stadium shapes on the left panel are obtained by dilating the detected track +with a disc. The radius of the disc is obtained from the gradient of the image +taken from segments perpendicular to the tracks (right). The blue line in the +left panel represent one of the perpendicular segments along which the image +gradient is obtained. The black dotted lines show the gradient profile along +all the segments, while the median of all profiles is shown with the red solid +line. Using these profiles, the total width of the track (blue solid lines) can +be found, and it is used as the width of the inner aperture. The outer aperture +has a fixed total width of 20 pixels. +nated by straylight, which means that the background brightness is +much higher than in the images at low phase angle. For this rea- +son, faint tracks cannot be detected in high phase angle images as +they blend into the background, so the sample is biased towards +brighter tracks, as can be seen in Fig. 7. For the rest of this phase +function analysis, tracks obtained from images taken at phase angles +greater than 120◦will be discarded. Secondly, the scattering phase +function values are higher for low phase angles, so fainter agglom- +erates can be detected in them. This effect introduces a bias to the +phase function derived from the detections. For overcoming this +issue, we will adopt an iterative process. Following the results of +Fornasier et al. (2015) and Güttler et al. (2017), we fit the median +values of the track brightness using an exponential function of the +form 𝑅(𝛼) = 𝐴 × exp(−𝛽 × 𝛼). Using this result as a preliminary +phase function, we look for the faintest track in the images taken +at the highest phase angle, and extrapolate its brightness for the re- +maining images. This extrapolated value is used as a lower threshold +for the brightness of the tracks considered for the second iteration +step, discarding all fainter tracks. The fitting is then repeated and the +coefficients are calculated again for the debiased sample. +The chosen expression for the phase function provides a good fit +of the values after removing the values for phase angles greater than +120◦from the sample: the mean value of the coefficient of determi- +nation 𝑅2 for all samples is 0.83. This confirms the results of Fulle +et al. (2018), who show the characteristic U-shape function found for +the dusty coma phase function (Bertini et al. 2017) is valid for parti- +cles with radius 𝑟 < 1.25 mm, smaller than the ones observed in the +OSIRIS data used in Section 2. However, we find that the mean value +for all the sets is 𝛽 = 8.2 × 10−3, around five times smaller than that +found by previous works focusing on the comet nucleus. Following +the classic theory by Lumme & Bowell (1981a,b), this can be ex- +plained by shadowing due to different surface roughness. Even when +the nucleus phase function is corrected for the self-shadowing, the +total brightness depends on the pixel resolution, since non-resolved +shadows cannot be corrected and affect the final result (see fig. 1 in +MNRAS 000, 1–12 (2022) + +STP063 +180 +135 +Mean direction (deg) +90 +F28 (40 s) +F22 (7 s) +45 +F24 (73 s) +Solar +口 +Radial +0 +50 +100 +150 +Phase angle (deg)STP086 +180 +中 +F28 (40 s) +F22 (7 s) +F24 (73s) +Solar +135 +口 +Radial +90 +45 +0 +0 +50 +100 +150 +Phase angle (deg)STP092 +180 +135 +Mean direction (deg) +90 +中 +F28 (80 s) +45 +中 +F22 (14 s) +中 +F24 (146 s) +Solar +口 +Radial +0 +0 +50 +100 +150 +Phase angle (deg)0.8 +0.6 +0.4 +0.2 +15 +5 +10 +20 +0 +Position +(pixDynamics of dust in the coma of 67P +7 +Figure 7. Phase function of the observed agglomerates. From top to bottom +are the results for STP063, STP086 and STP092. The light colored dots +indicate the integrated reflectance for each track, while the colored triangles +show the median for each phase angle. The colored circles represent the +median of the integrated reflectance after filtering out dim tracks. +Figure 8. Mean direction of the tracks in the F24 images for STP092, com- +pared against the mean directions for the simulated trajectories with radiation +pressure factors of 𝐶 = 0, 10 and 100. Results for 𝐶 = 1 were almost identical +to the ones for 𝐶 = 0, so were not included in this plot. +Hasselmann et al. 2021). Because of this, a lower 𝛽 value for the +coma agglomerates is consistent with the smaller size of the dust +agglomerates in the coma compared to that of the nucleus. +While analyzing the time evolution of 𝛽, we find that the mean +value found for all three filters is 9.5×10−3, 8.8×10−3 and 6.4×10−3 +for the sets STP063, STP086 and STP092 respectively. Using the +same argument as before, this can be explained as a reduction of the +median size of the agglomerates in the coma, which is consistent +with the increase of the heliocentric distance of the comet. +5 COMPARISON WITH THE MODEL AND DISCUSSION +5.1 Orientation angle distribution +In order to test if the measured directions introduced in Section 4.1 +can be explained by a projection effect, we create synthetic images +corresponding to the STP092 observation geometry, but without tak- +ing into account the gaseous drag. Since the results depend on density +and size mainly through the gas drag, the choice of these parameters +does not affect the results excessively. For this test case, we use ag- +glomerates with 𝜌 = 100 kg m-3, 𝑟𝑑 = 1 cm which are ejected from +the nucleus with a most probable speed of 1 m s-1. In order to check +the relevance of the radiation pressure on this effect, we compute the +trajectories of agglomerates in initially radial trajectories under the +effect of the gravitational and radiation pressure forces, but including +a multiplicative factor 𝐶 for the latter. Fig. 8 displays the same plot +as in Fig. 5 for the aforementioned set, but with the mean directions +for various 𝐶 values superimposed. In the purely gravitational case +(𝐶 = 0), the agglomerates move in radial trajectories, but even so, the +deviation can be observed. This effect can be explained by a simple +projection effect: here, the radial direction is defined as the projec- +tion onto the image of a vector joining the nucleus and the spacecraft. +Since the agglomerates are not in the same position as the spacecraft, +the projection of their own (local) radial directions is not necessarily +parallel to the one at the spacecraft. +Although the projection effect can explain the trend of the deviation +from radial direction as a function of the phase angle for the purely +MNRAS 000, 1–12 (2022) + +F24 - Blue (480.7 nm) F22 - Orange (648.5 nm) +F28 - Red (743.7 nm +10 +All lines +All lines +All lines +β1 = 8.62e - 03 +B1 = 9.09e - 03 +Bi = 1.09e - 02 +Debiased +Debiased +Debiased +β2 = 8.56e - 03 +B2 = 9.09e - 03 +B2 = 1.09e - 02 +10-5 +10-6 +Brightness +10-8 +10-9 +20 +60 +100 +140 +20 +60 +100 +140 +20 +60 +100 +140 +Phase AngleF24 - Blue (480.7 nm) F22 - Orange (648.5 nm) +F28 - Red (743.7 nm +10 +All lines +All lines +All lines +βi = 9.00e - 03 +Bi = 1.03e - 02 +Bi = 7.16e - 03 +Debiased +Debiased +Debiased +β2 = 8.99e - 03 +B2 = 1.03e - 02 +B2 = 7.16e - 03 +10-5 +王 ++ +10-6 +Brightness +10-7 +10-8 +10-9 +20 +60 +100 +140 +20 +60 +100 +140 +20 +60 +100 +140 +Phase AngleF24 - Blue (480.7 nm) F22 - Orange (648.5 nm) +F22 - Red (743.7 nm +10 +All lines +All lines +All lines +B1 = 5.99e - 03 +B1 = 5.87e - 03 +Bi = 8.11e - 03 +Debiased +Debiased +Debiased +B2 = 5.99e - 03 +β2 = 5.14e - 03 +B2 = 8.06e - 03 +10- +10-6 +10-8 +10-9 +20 +60 +100 +140 +20 +60 +100 +140 +20 +60 +100 +140 +Phase Angle180 +135 +Mean direction (deg) +90 +Observed +45 +C=0 +C = 10 +C = 100 +Radial +Solar +20 +40 +60 +80 +100 +120 +140 +Phase angle (deg)8 +P. Lemos +gravitational (𝐶 = 0) case, this effect alone is not sufficient to explain +the absolute value of the deviation at phase angles larger than 120◦. +Additionally, the observed angular dispersion is larger than the one +found with this model for all phase angles. However, a value of 𝐶 ≫ 1 +can account for the mean direction as well as the angular dispersion +in the high phase angle region. +From a physical perspective, several processes can be invoked +in order to explain an enhancement of the radiation pressure. For +example, an agglomerate mass versus cross section ratio smaller +than the one used for the integration, either caused by a lower density +or a nonspherical shape, can explain the higher radiation pressure +effect. Also, additional forces parallel to the solar direction, such as +outgassing from slowly rotating agglomerates (Kelley et al. 2013), +can account for the effect. However, it is worth noticing the limitations +given by the choice of boundary conditions. In order to reduce the +time required for the simulations, the integration domain limit is set +to an altitude 20 km higher than that of the spacecraft. In the case +that some agglomerates that are ejected from the nucleus decelerate +and fall back at altitudes above the domain limit, their trajectories +would not be taken into account by our model. The inclusion of these +agglomerates may be able to modify the results, even for typical +values of radiation pressure forces. +This finding represents a nuance with respect to previous results. +Della Corte et al. (2016) and Longobardo et al. (2019, 2020) report +radial trajectories for particles analyzed by GIADA, but the smaller +size of these particles compared to those that OSIRIS is able to +observe (see Sec. 5.2) may explain this feature, since they are more +affected by the gaseous drag. In addition, Longobardo et al. (2020) +proposes that the motion of the particles could only be considered +to be radial up to altitudes of ∼ 40 km, since the radiation pressure +plays an important role for higher altitudes. Likewise, Gerig et al. +(2018) find that dust agglomerates observed by OSIRIS follow a +free-radial outflow from the nucleus for distances larger than 12 km +from it, but their analysis is limited to altitudes up to 40 km. On the +other hand, Frattin et al. (2021) analyze similar OSIRIS images, with +tracks generated by the motion of dust agglomerates. As in our case, +they find that most of the agglomerates have trajectories close to the +radial direction, and interpret the remaining ones as a population of +objects on bound orbits around the nucleus. +As a summary, the explanation for the track direction presented in +this work proposes that agglomerate trajectories have a clear general +orientation. Like the interpretation proposed in previous works, we +find that this general orientation is close to the radial direction once +the projection effect is taken into account. However, for phase angles +greater than 120◦, both the most probable orientation angle and +its dispersion cannot be well reproduced by radial trajectories only. +For explaining these trajectories, forces other than the gravity of +the nucleus (e.g. radiation pressure) or, following the explanation +by Frattin et al. (2021), an increased proportion of agglomerates in +bounded orbits, must be considered. +5.2 Dust parameter optimization +As was explained in Section 3.3, the generation of synthetic images +is done based on the trajectories computed for individual dust pa- +rameters combinations. However, no single parameter combination +will fully represent the observations, since the analyzed OSIRIS im- +ages contain tracks generated by a variety of different agglomerates. +For overcoming this issue, we will assume that the distribution of +track properties obtained from the real images can be expressed as a +linear combination of those in the synthetic ones. This implies that +interactions between different types of agglomerates, either directly +through collisions or indirectly mediated by the gas in the coma, are +considered not relevant for their dynamical evolution. +We will focus on comparing the distribution of track properties in +the length–orientation angle space. In order to compare the distribu- +tions of these properties with those obtained from the real images, +we generate normalized histograms with equal bin ranges for tracks +detected in both types of images. Mathematically, this is equivalent +to creating a 2D matrix in which each element represents the number +of tracks with a specific combination of length and orientation angle, +and normalizing it by the total number of tracks in the image. Then, +for each observation geometry there exist two types of histograms, +the one obtained from the real image x, and from the synthetic ones +y𝑖, where 𝑖 represents the different dust properties used for the simu- +lations. We generate the master synthetic histogram Z from a linear +combination Z = � +𝑖 𝐾𝑖y𝑖. The coefficients 𝐾𝑖 for the linear combi- +nation, which roughly represent the preponderance of agglomerates +with certain properties in the image, are chosen in such a way that +they minimize the 𝜒2 distance between the real x and master syn- +thetic Z histograms. The 𝜒2 distance measures the distance between +two histograms with 𝑁 bins, and is defined as +𝜒2 = 1 +2 +𝑁 +∑︁ +𝑗=1 +(𝑥 𝑗 − 𝑍 𝑗)2 +(𝑥 𝑗 + 𝑍 𝑗) , +(10) +where 𝑥 𝑗 and 𝑍 𝑗 represent the value of each bin for the real x and +master synthetic Z histograms respectively. +The 𝐾𝑖 coefficients giving the best-fitting are found using an it- +erative linear least-squares solver with random initial values, while +the constraints for the coefficients are 𝐾𝑖 > 0 and � +𝑖 𝐾𝑖 = 1. For +simplicity, we computed the 𝜒2 distance for all the individual his- +tograms y𝑖 and calculated the master synthetic histogram Z using +only a subset of 100 synthetic images with the best results. In order +to check whether the image subset choice affects the results, we re- +peated the fit with different number of synthetic images. We found +that performing the fit with this 100 synthetic images provided the +best compromise between accuracy of the results, showed by the +residual after the fit, and the execution time. An example of the best +fit histograms obtained using this method can be seen in Fig. 9. +We use these 𝐾𝑖 values to obtain a weighted distribution of the +parameters used in the simulations. Fig. 10 shows an example of +this weighted distribution for an image from the set STP092 taken +at a phase angle 𝛼 = 120◦ with the Blue filter. This distribution is +obtained by grouping the synthetic images by their values of a certain +parameter (in the case of Fig. 10, density, agglomerate radius, initial +speed and initial direction), and summing the 𝐾𝑖 values of the groups. +We found that varying the number of synthetic images used for the +fit does not change significantly these results. +The weighted means can be found from the mentioned distri- +butions. Fig. 11 shows the dependence of these means with the +phase angle at which the images were taken. We plot 4 differ- +ent parameters: agglomerate density, radius, most probable ini- +tial speed and mass over area ratio. The mass over area ratio +𝑀/𝐴 = (4/3𝜋𝜌𝑟𝑑3)/(𝜋𝑟𝑑2) = 4/3𝜌𝑟𝑑 is useful for quantifying the +effect of the radiation pressure and gaseous drag over the agglom- +erate dynamics, since both the 𝐹𝐺/𝐹𝑅 and 𝐹𝐺/𝐹𝐷 ratios depend +linearly on it. This model is not able to provide tight constraints for +the density, and hence neither for the 𝑀/𝐴 ratio, but a clear trend +can be seen for the remaining parameters. Lastly, as can be seen in +the bottom panel in Figure 10, the results are independent from the +choice of the angle between initial velocity and local normal 𝜃𝑑, so +are not shown here. This is because while the initial velocity of the +MNRAS 000, 1–12 (2022) + +Dynamics of dust in the coma of 67P +9 +Figure 9. Observed and fitted distribution of the tracks in the orientation +angle versus track length found for the set taken at STP092 with a phase angle +of 𝛼 =30◦. The rows represent the filter used for the image acquisition (from +top to bottom Blue, Orange and Red), while the left and right columns show +observed and fitted distributions respectively. The color code represent the +number of tracks found in that particular bin, normalized by the total number +of tracks in the image. +dust agglomerates is not radial, the initial velocity of the gas is, so the +gaseous drag force, which is very strong due to the high gas density +in the vicinity of the nucleus, cancels out the possible effect of this +non-radial initial velocity. +As can be seen in Figure 11, the results for the images obtained +using the Blue and Red filters match with each other, while the ones +for the Orange filter show a larger deviation. As mentioned in Section +2.1, the exposure times used for this type of images introduces a bias +towards agglomerates that are faster/closer to the camera, and may +be responsible for the observed discrepancies. +It can be noted that the mean sizes for the chunks follow the +expected trend: the mean chunk sizes are larger for the sets taken +closer to the perihelion, and they are larger for increasing phase +angles, that is, looking into the dayside of the coma. In order to +increase the sample size, the combined results for the chunk size as +a function of phase angle obtained from the fits for the Blue and Red +filters are shown in Figure 12. However, the sizes are much larger +than the theoretical maximum liftable size found using the model by +Fulle et al. (2020). In contrast, Gundlach et al. (2020) and Ciarniello +et al. (2022) show that CO2 ice sublimation is the main driver of +the activity of chunks with sizes ≳ 10 cm. This is because the water +sublimation front is located at shallower depths from the surface, so it +can only build up enough pressure to overcome the material internal +strength and eject chunks at these shallow depths. On the other hand, +the CO2 sublimation front is located deeper, allowing to eject larger +chunks. It is important to notice that the studied case assumes that +the agglomerates are ejected when the gas pressure overcomes the +Figure 10. Weighted distribution of the parameters found for the image taken +at 𝛼 = 120◦ with the Blue filter in the set STP092. +material tensile strength. Therefore this model is not able to analyze +the case where a detached agglomerate resting on top of the surface +manages to gain an initial impulse and gets lifted. +The scenario where the chunks are ejected via CO2 sublimation +is consistent with the observed nonzero mean initial speed of the +agglomerates found in our simulations. Since CO2 sublimation is +not included in our model, this can be represented as an initial kick +to the chunks, making it possible for them to be lifted. Once in the +coma, the agglomerates evolve dynamically under the influence of +the mentioned forces, in particular gas drag. However, since the CO2 +production rate is around one order of magnitude lower than that of +H2O, the latter controls the drag force, and the assumption of a coma +composed by water vapour is still valid. +Regarding the initial speed values, we observe that initial velocities +as derived from histogram fitting are larger for the set from STP063, +that is, the closest one to perihelion. Moreover, in the purely gravi- +tational case (i.e. without radiation pressure nor gas drag), the initial +speed required for reaching the altitudes at which the spacecraft was +located in all the three analyzed sets is ≃ 0.80 m s-1. Also for all +three data sets, the initial velocities found via the histogram fitting +(Fig. 11) are sufficient for the chunks to reach the spacecraft altitude. +Hence, the mere presence of these agglomerates in the FOV does +not imply that they have been significantly accelerated by gas drag +after leaving the nucleus surface. Due to their large size, it is unclear +whether gas drag has any relevance to the dynamic evolution of these +chunks. +We use an indirect approach to estimate the effect of gas drag on +the observed chunks, based on the fraction of them having bound +orbits and the distribution of initial velocities of agglomerates inside +the FOV. If we assume the gas drag does not influence the dynamics +of the chunks, the initial speed needed to reach the FOV (0.80 m s-1) +MNRAS 000, 1–12 (2022) + +90 +7.4 +0.18 +45 +0 +0.16 +-45 +0.14 +F24 (Blue)- obs +F24 (Blue)- fitted +98 +0.12 +45 +0.1 +45 +0.08 +F22 (Orange) - obs +F22 (Orange) - fitted +-90 +90 +0.06 +45 +0.04 +0 +0.02 +-45 +F28 (Red) - obs +F28 (Red) - fitted +-90 +0 +0 +200 +400 +6000 +200 +400 +600 +Track length (pix)0.5 +0 +1 +10 +50 +100 200 500 800 +p (kg/m3) +0.5 +0 +0.01 0.1 +0.5 +5 +10 +50 +1 +rd (cm) +0.5 +0 +0 +0.5 +1 +2 +5 +10 +Up (m/s) +0.5 +0 +0 +20 +40 +60 +OD (deg)10 +P. Lemos +Figure 11. Weighted mean for the dust parameters. The four selected parameters, density, radius, initial speed and 𝑀/𝐴 ratio are displayed in columns from +left to right, while the results for STP063, STP086 and STP092 are shown in the top, middle and bottom rows respectively. Line colors indicate the used filter as +in Fig. 5. +Figure 12. Weighted mean chunk size obtained from the combinations of +Blue and Red filters as a function of the phase angle. It can be seen that the +chunk size increases with smaller heliocentric distances. +is close to the escape speed for the spherical nucleus (0.82 m s-1). +Since the initial velocities are taken from the Maxwell–Boltzmann +distribution given in equation 7, we calculate the probability that an +agglomerate has an initial speed sufficient for reaching the FOV, but +still smaller than the escape speed. We find that this condition is +fulfilled by only a small proportion (<1%) of agglomerates in this +purely gravitational case. +However, if we analyze the energy of the agglomerates that inter- +sect the FOV in our simulations (which include the effect of gas drag), +we find a much higher proportion of bound orbits. This number is +highly variable between data sets, ranging from 0 to 30 per cent, with +a mean of 12. We interpret this finding such that the majority of the +bound chunks were initially lifted with speeds too small to reach the +FOV, but were subsequently accelerated towards crossing the FOV +by gas drag. +This interpretation is supported by 32 per cent of the chunks that +reach the FOV having initial speeds lower than needed to reach the +spacecraft altitude. All this implies that the dynamics of the large +chunks found in our simulations is still affected by gas drag. +It is important to note that the chunk sizes found in this work are +larger than the ones found by Frattin et al. (2021) for the same type +of images. Agglomerate sizes compatible with Frattin et al. (2021) +would have too high velocities in our model to be compatible with +the observed length of tracks. Fig. 13 shows the median track lengths +found in the synthetic images as a function of the agglomerate size, +where it can be seen that for small particles, the tracks are longer. +The reason for this behaviour is that after acceleration by gas drag, +the ratio 𝐹𝐺/𝐹𝐷 ∝ 𝑟𝐷 for a fixed particle density. This implies +that smaller particles are more susceptible to the action of the gas +drag, and can acquire higher velocities generating longer tracks. For +agglomerate sizes compatible with Frattin et al. (2021), the tracks +in the synthetic images are longer than those in our OSIRIS images, +so our data can only be reproduced by larger chunks. This effect is +particularly noticeable for the case of STP063, where the gas flow +was so intense that additional simulations with larger chunks had to +be carried out (see Table 2). +However, there may exist other effects that slow down the agglom- +erates. For example, the effect of solar gravity that was not taken into +account in the dynamical simulations could provide an alternative +way of producing shorter tracks. Since the simulations were carried +out in the non inertial nucleocentric reference frame, the net force +MNRAS 000, 1–12 (2022) + +0.8 +6 +600 +450 +TP063 +400 +4 +300 +0.4 +200 +2 +150 +S +0 +0 +0 +0 + 600 +0.8 +6 +450 +STP086 + (kg/ +三4 +e0 400 +300 + 0.4 +A150 +S +α 200 +M +0 +0 +0 +0 +0.8 +6 +600 +450 +STP092 +4 +400 +300 +0.4 +200 +2 +150 +S +0 +0 +0 +0 +0 +50 100 +50 100 +50 100 +0 +0 +Phase angle (deg)0.8 +宜0.6 +p +.@.. STP063 +×-STP086 +0.4 +含-STP092 +0.2 +0 +50 +100 +150 +Phase angle (deg)Dynamics of dust in the coma of 67P +11 +Figure 13. Median track length found in synthetic images for different values +of particle radius. Each black dot represents the length median value of all +the track found in a synthetic image. Red symbols show the median value of +all images grouped by the particle size used for the simulation. A clear trend +can be seen where larger agglomerates generate shorter tracks. +acting over the agglomerates in this frame is the difference between +solar gravity force over the agglomerate and the nucleus, i.e. the tidal +force. This force increases linearly with the nucleocentric distance, +and due to the observation geometry (the nucleus–spacecraft vector +forming ≃90◦with the radial direction), its direction at the position of +the spacecraft is radial pointing to the nucleus, effectively increasing +the value of the nucleus gravity acceleration and slowing even further +the observed dust agglomerates. Assuming that all the agglomerates +present in the images are at the same height from the nucleus as +the spacecraft, and using the approximated expression for the tidal +acceleration 𝑎𝑇 = G𝑀⊙𝑟/𝑟3 +ℎ, the ratio between tidal and nucleus +gravity forces 𝐹𝑇 /𝐹𝐺 is equal to 0.19, 0.02 and 5.7×10−3 for the +sets STP063, STP086 and STP092 respectively. Then, the tidal ef- +fects may play a relevant role in the dynamical evolution of the dust +agglomerates, mainly for the first set . +6 CONCLUSIONS +We developed a semi automatic method to detect tracks generated +by agglomerates moving in front of the OSIRIS camera onboard +Rosetta. This method exploits the fact that the agglomerates move in +front of the camera generating the particular track pattern. We applied +this method to three different image sets taken with the NAC camera +composed by a total of 105 images, and detected 20033 tracks. +We analyzed the photometric data obtained from those tracks, +and found that the agglomerates’ phase functions do not show the +characteristic U-shape found for the phase function of the coma +(Bertini et al. 2017), but rather follow the same exponential trend +as the one shown by the nucleus (Fornasier et al. 2015; Güttler +et al. 2017). Following Fulle et al. (2018), this establishes a lower +limit for the agglomerate sizes at 𝑟 > 1.25 mm. The value of the +phase function exponent 𝛽 found for the agglomerates is smaller +than the nucleus one, consistent with the difference in roughness +scales between both samples. We also observed that the 𝛽 value +decreases for increasing heliocentric distances, indicating a decrease +in the median size of the agglomerates detected in the coma. +We used a simplified dynamical model in order to create synthetic +images that reproduce the observations. We solved the inverse prob- +lem to find the values characterizing the dust that best reproduce the +observed tracks. Using this method we could impose a loose con- +straint in the density (𝜌 = 200 – 800 kg m-3), but tighter ones for +the initial velocities (𝑣𝑃 ≃ 1 m s-1) and chunk radii (several dm). +Both the initial velocities and chunk radii vary for different helio- +centric distances, consistent with the gaseous production rate and the +observed phase function. +Even when the radii obtained by the comparison between the +observation and the dynamical model only provide an upper limit, +the activity model used here cannot provide the required pressure +needed to lift agglomerates of such sizes. Instead, it is necessary to +invoke other source of gas like CO2 (Gundlach et al. 2020) in order +to explain the ejection of those chunks. +We also showed that other dynamical effects such as solar gravity +may play an important role in determining the dynamics of the ag- +glomerates, principally for sets taken closer to perihelion, where the +combination of the small heliocentric distance with the high space- +craft altitude makes the agglomerates seen by the spacecraft much +more susceptible to its effect. In order to better model the dynamics +of the agglomerate, this effect must be taken into account for future +works. +ACKNOWLEDGEMENTS +We thank the referee for his constructive suggestions that significantly +helped to improve the quality of this manuscript. We thank Nick +Atree, Yuna Kwon, Manuela Lippi, Johannes Markannen, Raphael +Marschall and Marius Pfeifer for our fruitful discussions. OSIRIS +was built by a consortium of the Max-Planck-Institut für Sonnensys- +temforschung, Göttingen, Germany; the CISAS University of Padova, +Italy; the Laboratoire d’Astrophysique de Marseille, France; the In- +stituto de Astrofísica de Andalucia, CSIC, Granada, Spain; the Re- +search and Scientific Support Department of the European Space +Agency Noordwijk, The Netherlands; the Instituto Nacional de Téc- +nica Aeroespacial, Madrid, Spain; the Universidad Politécnica de +Madrid, Spain; the Department of Physics and Astronomy of Uppsala +University, Sweden; and the Institut für Datentechnik und Kommu- +nikationsnetze der Technischen Universität Braunschweig, Germany. +The support of the national funding agencies of Germany (DLR), +France (CNES), Italy (ASI), Spain (MEC), Sweden (SNSB), and the +ESA Technical Directorate is gratefully acknowledged. We thank the +Rosetta Science Ground Segment at ESAC, the Rosetta Missions Op- +erations Centre at ESOC and the Rosetta Project at ESTEC for their +outstanding work enabling the science return of the Rosetta Mis- +sion. This work used the Scientific Compute Cluster at GWDG, the +joint data center of Max Planck Society for the Advancement of Sci- +ence (MPG) and University of Göttingen. The authors acknowledge +funding by the ERC Starting Grant No. 757390 Comet and Aster- +oid Re-Shaping through Activity (CAstRA). PL conducted the work +in this paper in the framework of the International Max-Planck Re- +search School (IMPRS) for Solar System Science at the University of +Göttingen. JA acknowledges funding by the Volkswagen Foundation. +DATA AVAILABILITY +The data underlying this article are available at the Planetary +Science Archive of the European Space Agency under https: +//www.cosmos.esa.int/web/psa/rosetta +MNRAS 000, 1–12 (2022) + +350 +300 +. +.. +. +250 +. +. +- +. +(pix) +- +200 +- +: +e +Track +150 + * +. +. +. +. +. +100 +50 +. +E +E +10-4 +10-3 +10-2 +10-1 +100 +rd +(m)12 +P. Lemos +REFERENCES +Agarwal J., et al., 2016, MNRAS, 462, S78 +Bertini I., et al., 2017, MNRAS, 469, S404 +Bird G. A., 1994, Molecular Gas Dynamics And The Direct Simulation Of +Gas Flows +Ciarniello M., et al., 2022, Nature Astronomy, 6, 546 +Della Corte V., et al., 2016, MNRAS, 462, S210 +Della Corte V., et al., 2019, A&A, 630, A25 +Drolshagen E., et al., 2017, Planet. Space Sci., 143, 256 +Duda R. O., Hart P. E., 1972, Commun. ACM, 15, 11–15 +Fornasier S., et al., 2015, A&A, 583, A30 +Frattin E., et al., 2017, MNRAS, 469, S195 +Frattin E., et al., 2021, MNRAS, 504, 4687 +Fulle M., et al., 2016, ApJ, 821, 19 +Fulle M., et al., 2018, MNRAS, 476, 2835 +Fulle M., Blum J., Rotundi A., Gundlach B., Güttler C., Zakharov V., 2020, +MNRAS, 493, 4039 +Gerig S. B., et al., 2018, Icarus, 311, 1 +Gundlach B., Fulle M., Blum J., 2020, MNRAS, 493, 3690 +Güttler C., et al., 2017, MNRAS, 469, S312 +Hasselmann P. H., et al., 2021, Icarus, 357, 114106 +Hough P. V., 1962, Method and means for recognizing complex patterns +Keller H. U., et al., 2007, Space Sci. Rev., 128, 433 +Kelley M. S., Lindler D. J., Bodewits D., A’Hearn M. F., Lisse C. M., +Kolokolova L., Kissel J., Hermalyn B., 2013, Icarus, 222, 634 +Kwon Y. G., Bagnulo S., Markkanen J., Agarwal J., Kolokolova L., Levasseur- +Regourd A.-C., Snodgrass C., Tozzi G. P., 2022, A&A, 657, A40 +Longobardo A., et al., 2019, MNRAS, 483, 2165 +Longobardo A., et al., 2020, MNRAS, 496, 125 +Longobardo A., et al., 2022, MNRAS, 516, 5611 +Lumme K., Bowell E., 1981a, AJ, 86, 1694 +Lumme K., Bowell E., 1981b, AJ, 86, 1705 +Mannel T., et al., 2019, A&A, 630, A26 +Merouane S., et al., 2017, MNRAS, 469, S459 +Mignone A., Bodo G., Massaglia S., Matsakos T., Tesileanu O., Zanni C., +Ferrari A., 2007, ApJS, 170, 228 +Ott T., et al., 2017, MNRAS, 469, S276 +Pfeifer M., Agarwal J., Schröter M., 2022, A&A, 659, A171 +Rotundi A., et al., 2015, Science, 347, aaa3905 +Tubiana C., et al., 2015, A&A, 583, A46 +Zakharov V. V., Ivanovski S. L., Crifo J. F., Della Corte V., Rotundi A., Fulle +M., 2018, Icarus, 312, 121 +Zakharov V. V., Rodionov A. V., Fulle M., Ivanovski S. L., Bykov N. Y., Della +Corte V., Rotundi A., 2021, Icarus, 354, 114091 +This paper has been typeset from a TEX/LATEX file prepared by the author. +MNRAS 000, 1–12 (2022) + diff --git a/EdE4T4oBgHgl3EQfGgyE/content/tmp_files/load_file.txt b/EdE4T4oBgHgl3EQfGgyE/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..179742f04e9b020391f7d93d66dca6100a6faf7a --- /dev/null +++ b/EdE4T4oBgHgl3EQfGgyE/content/tmp_files/load_file.txt @@ -0,0 +1,782 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf,len=781 +page_content='MNRAS 000, 1–12 (2022) Preprint 13 January 2023 Compiled using MNRAS LATEX style file v3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='0 Distribution and dynamics of decimeter-sized dust agglomerates in the coma of 67P/Churyumov–Gerasimenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Pablo Lemos,1,2★, Jessica Agarwal1,2, Matthias Schröter3 1Institut für Geophysik und Extraterrestrische Physik, Technische Universität Braunschweig, Mendelssohnstraße 3, Braunschweig 38106, Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2Max Planck Institute for Solar System Research, Justus-von-Liebig-Weg 3, Göttingen 37077, Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 3Max Planck Institute for Dynamics and Self-Organization, Am Faßberg 17, D-37077 Göttingen, Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Accepted XXX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Received YYY;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' in original form ZZZ ABSTRACT We present a method to analyze images of the coma of 67P/Churyumov–Gerasimenko obtained using OSIRIS, the main imaging system onboard Rosetta, where dust aggregates can be seen as bright tracks because of their relative velocity with respect to the spacecraft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We applied this method to 105 images taken in 2015 July, 2015 December and 2016 January, identifying more than 20000 individual objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We performed a photometric analysis of them, finding their phase function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This phase function follows the same trend as the one found for the nucleus, consistent with the detected particles having a size larger than ∼ 1 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Additionally, the phase function becomes shallower for increasing heliocentric distances, indicating a decrease in the mean agglomerate size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to characterize the agglomerates observed in the image, we developed a simplified model for their ejection and dynamics in the coma, and generated synthetic images based on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We solved the inverse problem by finding the simulation parameters that give the best fit between synthetic and real images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In doing so, we were able to obtain a mean agglomerate size ∼ dm and initial speed ≃ 1 m s-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Both show a decrease with increasing heliocentric distance, sign of the reduction in activity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Also, the sizes obtained by the comparison are not compatible with ejection caused by water activity, so other sources have to be invoked, mainly CO2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Key words: methods: data analysis – methods: numerical – comets: individual: 67P/Churyumov–Gerasimenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 1 INTRODUCTION The Rosetta mission provided data with unprecedented detail on comet 67P/Churyumov–Gerasimenko (hereafter 67P) by sampling its environment in situ during a period of around two years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In particular, cometary dust particles over a wide range of sizes were collected, analyzed and characterized by MIDAS (for particles in the range 𝜇m to tens of 𝜇m, Mannel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Longobardo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2022), COSIMA (tens of 𝜇m – hundreds of 𝜇m, Merouane et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2017) and GIADA (hundreds of nm – tens of mm, Della Corte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Longobardo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2022) instruments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Larger objects (≳ 1 cm) could be detected by the main imaging system onboard Rosetta, the Optical, Spectroscopic and Infrared Remote Imaging System (OSIRIS, Keller et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The data obtained by OSIRIS make it possible to obtain information about the morphological and dynamical properties of the dust, and in case the same object could be identified in more than one image while using different filters, also about its color, which can give hints about its composition (Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Kwon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, remotely analyzing individual dust particles or aggre- gates in the coma must face a fundamental issue: the distance from the sensor to the object is unknown, so its size and velocity, and hence size and mass distribution, cannot be uniquely determined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Several works deal with this issue in different ways: Rotundi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2015) and Fulle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2016) assume that the motion of the objects ★ E-mail: j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='lemos-velazquez@tu-braunschweig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='de is entirely radial from the nucleus, and that the apparent motion with respect to the camera comes mainly from the spacecraft velocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using those assumptions, the distance can be determined using the parallax effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2016) and Pfeifer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2022) use images where the nucleus limb is present, and focus on agglomerates going away from it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' These agglomerates have a higher probability of being recently ejected, so it can be assumed that they are at the same distance as the nucleus;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Drolshagen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017) and Ott et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017) exploit the fact that the two detectors of OSIRIS, the Narrow (NAC) and Wide (WAC) Angle Cameras, are separated by ≃ 70 cm on the spacecraft, so if both cameras detect the same object, the parallax effect can be used to measure its distance to the camera;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' in Güttler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017) it is noted that objects closer than ∼ 100 m appear unfocused in WAC images, so the authors develop a method to measure the distance to objects close to the camera by measuring the apparent size of the unfocused pattern, which is directly related to its distance;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' finally, Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021) uses a mixed approach, constraining the sizes and distances of the dust agglomerates based on speed distributions taken from some of the works listed before, in combination with photometric simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The approach of this work is different from that of its predecessors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Instead of looking for an alternative method for determining the dis- tance, we propose to bypass this requirement by using a combination of observations and statistical modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' On the one hand, images taken by OSIRIS are analyzed in order to obtain a set of observables from the distribution of dust agglomerates present in each of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' © 2022 The Authors arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='04895v1 [astro-ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='EP] 12 Jan 2023 2 P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lemos On the other hand, we simulate the trajectories of dust agglomerates through the coma using a simplified ejection and dynamical model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' These trajectories are characterized by different dust parameters, such as size, density and initial velocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Based on these simulations and the spacecraft position and orientation, a group of synthetic images of the agglomerates as seen by OSIRIS are generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using these synthetic images, the inverse problem is solved by optimizing the pa- rameter choice for the dynamical simulations in order to reproduce properties of the dust agglomerate trajectories observed in the real images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This approach is also different from that applied in previ- ous works in that the properties of the entire population of detected objects are analyzed statistically, rather than dealing with individual objects as done previously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The work is organized as follows: in Section 2 the datasets and the dust agglomerate detection method are described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The dynamical model and the synthetic image generation are explained in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In Section 4 we present the analysis of the properties of dust agglomerates found in OSIRIS images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In Section 5 the synthetic images are compared with the real ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Finally, we present our conclusions in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2 OBSERVATIONS AND TRACKS DETECTION Rosetta escorted 67P from 2014 August when its heliocentric dis- tance was ≃ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 au inbound, to 2016 September when it was out- bound at ≃ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 au from the Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For this work we will focus on three different image sets obtained with the OSIRIS NAC around perihelion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' All these image sequences were obtained under the oper- ational activity DUST_PHASE_FUNCTION, originally devoted to the analysis of the dusty coma brightness as a function of the phase angle (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the angle between the Sun-spacecraft and camera pointing direction) and the wavelength.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to achieve this, the observ- ing conditions were such that the distance from the nucleus to the spacecraft remained nearly constant throughout the duration of the acquisition, while the camera pointing scanned the coma at different phase angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The plane of observation was nearly perpendicular to that containing the Sun, the nucleus, and the spacecraft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A sketch of the observation geometry can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The image sets used in this work were acquired on 2015 July 7, 2015 December 14 and 2016 January 21, at heliocentric distances of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='32 au inbound, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='89 au outbound and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='18 au outbound respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' All three sets were taken using the Blue F24 (peak transmission at 480.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm), Orange F22 (649.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 nm) and Red F28 (743.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm) filters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A binning of 4 × 4 was used for all images, so the final image size is 512 × 512 pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A summary of the observing conditions can be found in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Depending on the heliocentric distance and the filter used, the images were obtained using exposure times ranging from 7 to 146 seconds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' These exposure times combined with the nonzero relative velocity between the spacecraft and the dust agglomerates result in them appearing in the images not as point sources, but instead as elongated tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This fact will be exploited later, at the moment of the object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A total of 105 level 3F images were used for this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' These images are radiometrically calibrated, corrected for geometric distor- tion and for solar and in-field stray light, and expressed in reflectance units, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the corrected flux is normalized by the solar flux at the corresponding heliocentric distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A detailed description of the data processing steps can be found in Tubiana et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Despite being corrected for stray light effects, some of the high phase an- gle images present illumination artefacts that complicate the track Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Sketch of the observation geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The solar (blue dashed) and spacecraft (red dotted) directions with origin in the nucleus form a perpen- dicular angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The pointing of the camera, shown as violet solid lines, scan the coma for different phase angles in the plane roughly perpendicular to that of the spacecraft, nucleus and Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Note that the image is not to scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This problem is more evident in images taken at phase angles greater than 100◦, that is, when the camera pointing is closer to the Sun direction, so the results in this range should be treated with caution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 Detection method A semi-automatic detection method based on the one presented in Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017) was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The steps involved in this method are: A similarity map 𝑆𝑀𝜃 is created using a track template 𝑇𝜃.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' These templates consist of a square window of 10 pixels in length, where a straight line representing the track passes through the centre of the template.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The orientation angle 𝜃, defined as 𝜃 = arctan(−1/𝑚)1, where 𝑚 is the slope of the line, successively takes all the values in the [−90◦, +89◦] range, with steps of 4◦.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 𝑆𝑀𝜃 is calculated as the normalized cross correlation (NCC) between each image 𝐼 and the template 𝑇𝜃, and applying the convolution of the result with the same template 𝑆𝑀𝜃 = (𝐼 ¯⊗𝑇𝜃) ⊗ 𝑇𝜃.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (1) Binary images are generated from the similarity maps for each orientation by imposing a lower threshold defined as 𝐽 + 2𝑆, where 𝐽 and 𝑆 represent the local median and standard deviation of the 1 We use this definition for the orientation angle in order to match the one used in the Hough transform later in the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' MNRAS 000, 1–12 (2022) Z Y XDynamics of dust in the coma of 67P 3 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Image sets used for this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Columns represent the mid and short-term planning cycles (intervals of roughly one month and one week), date of acquisition, filters and exposure times used, heliocentric distances, nucleocentric distances and number of images in the set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Planning cycle (MTP/STP) Date F (𝑡𝑒𝑥 𝑝) 𝑟ℎ (au) 𝑟𝑆/𝐶 (km) #𝐼 018/063 2015-07-07 F22 (7 s), F24 (73 s), F28 (40 s) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='32 153.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 45 023/086 2015-12-14 F24 (73 s), F22 (7 s), F28 (40 s) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='89 102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='6 21 025/092 2016-01-21 F24 (146 s), F22 (14 s), F28 (80 s) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='18 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 39 NCC respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Nonzero pixels in these binary images represent locations in the image with high probability of having a track with a determined orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Tracks are detected from each binary image using a Hough transform method (Hough 1962, Duda & Hart 1972).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The outcome of this step is called nominal track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' To characterize the nominal tracks, segments perpendicular to the track are analyzed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The centres of the segments are equally spaced on the track, with a distance of 1/3 pixel between them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Brightness profiles are then generated by interpolating the image values over the segment positions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For each profile, two parameters are defined: its brightness peak value, and the residual distance to the nominal track, defined as the distance in pixels from the nominal track to the peak position along the mentioned segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Once these parameter pairs are defined for all segments, the track is characterized by a boundary region, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' a region in the brightness–residual space enclosed by the convex hull of all the pairs, extended by the standard deviation along each axis (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The nominal tracks are corrected for incomplete detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' First, the nominal track is preliminarily extended by 5 pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Then, the brightness–residual pairs are defined for the extended part, and com- pared with the boundary region defined in the previous step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' If the points corresponding to the extended part lie inside the boundary region, the line is extended.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The process is repeated until the added points do not belong to the region or the image edge is reached.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' An example of this process is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The extended tracks are analyzed in the search for duplicate detections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This is done by comparing the pixels spanned by the tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' If two extended tracks share more than 70% of their pixels, the tracks are merged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A manual inspection and correction of the results is performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' By using this method, a total of 20033 tracks were detected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This number is larger by at least an order of magnitude from previous studies focused in detecting and analyzing this type of tracks in similar images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' It is worth noticing that since the track templates 𝑇𝜃 have a size of 10 pixels, our algorithm is unable to find tracks shorter than that length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A smaller template size would have meant that shorter tracks could be detected, but also increases the chance of mistakenly identifying a group of bright background pixels as a real track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to check if this choice of template size introduces a bias in the detected tracks, we checked the properties a dust agglomerate must have to generate a track of this length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The projected track length in pixels 𝑙𝑝𝑖𝑥 depends on the agglomerate projected speed 𝑣, distance to the camera 𝑑 and the image exposure time 𝑡𝑒𝑥 𝑝.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The equation describing the track length as a function of these variables is 𝑙𝑝𝑖𝑥 = 𝑡𝑒𝑥 𝑝 𝑑 𝑅𝑁 𝐴𝐶 𝑣, (2) where 𝑅𝑁 𝐴𝐶 is the angular resolution of the camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For image sets similar to the ones used here, Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021) estimated the Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Example of the track correction and extension method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Top: The orange points represent the peak values positions from the segments per- pendicular to the track, which was obtained from the Hough transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The detected track fails to cover the entire length on the left side (the gaps in between are caused by overlapping background stars).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using these positions and brightness values, the boundary region is defined in the bottom panel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The same procedure is made for profiles in the extended region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The violet symbols from the extended track to the left lie inside this boundary region, so the line is extended.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' On the contrary, the green points on the right side are not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' maximum agglomerate to camera distance of 18 km.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' At that distance, the minimum agglomerate projected speed needed for generating a track longer than 10 pixels is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 m s-1 for images taken with the Blue and Red filters, but 1m s-1 for the ones corresponding to the Orange filter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Since Ott et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017) found that the median apparent speed of this type of agglomerates is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='6 m s-1, we can conclude that the tracks detected in the images taken with the Orange filter sample a population of agglomerates that has a higher relative speed to the spacecraft, is closer to the camera, or a combination of both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' MNRAS 000, 1–12 (2022) Boundary 6 Detected line Ext - right Ext - left brightness 5 Norm 0 0 2 4 6 Residual (pix)4 P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lemos 3 IMAGE MODELLING 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 Dust dynamics model Synthetic images were generated by modelling the trajectories of dif- ferent types of dust agglomerates in the comet coma, and looking for intersections with the camera FOV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A simplified model for comput- ing the dust agglomerate trajectories was developed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This model is initially developed in 2D, and assuming the activity is axially sym- metrical with respect to the solar direction, the 3D trajectories are obtained by rotating the 2D ones with respect to the solar direction by a random angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In this model, the nucleus is represented by a sphere of radius 𝑅𝑁 = 2000 m and mass 𝑀𝑁 = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='982 × 1012 kg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The dust agglomerates are assumed to be spherical with radius 𝑟𝑑 and density 𝜌𝑑,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' and are under the influence of three forces: nucleus gravity 𝐹𝐺,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' radiation pressure 𝐹𝑅 and gaseous drag 𝐹𝐷,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' expressed as FG = − G𝑀𝑁 𝑚 𝑟2 r 𝑟 (3) FR = − 𝑐⊙𝑄𝑅𝑃𝜋𝑟2 𝑑 𝑟2 ℎ𝑐 rh 𝑟ℎ (4) FD = |vg − vd|2 2 𝜌𝑔𝜋𝑟2 𝑑𝐶𝐷 V 𝑉 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (5) where 𝑚 = 4/3𝜋𝜌𝑑𝑟3 𝑑 is the object mass,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' r is the position of the object with respect to the nucleus,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' V = vg −vd is the relative velocity between the agglomerate and the gas,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 𝑐⊙ = 1361 W m-2 is the solar constant,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 𝑄𝑅𝑃 is the scatter efficiency for radiation pressure (assumed to be equal to 1),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 𝑟ℎ is the heliocentric distance expressed in au,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 𝑐 is the speed of light,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' vg and 𝜌𝑔 are the gas velocity and density respectively,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' and 𝐶𝐷 is the drag parameter,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' calculated using the free-molecular expression (Bird 1994) as 𝐶𝐷 = 2𝑠2 + 1 𝑠3√𝜋 exp (−𝑠2) + 4𝑠4 + 4𝑠2 − 1 2𝑠4 erf(𝑠) + 2√𝜋 3𝑠 √︄ 𝑇𝑑 𝑇𝑔 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (6) where 𝑠 = 𝑉/ √︁ 2𝑇𝑔𝑘𝐵/𝑚𝑔,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' and the dust temperature 𝑇𝑑 is assumed to be equal to the gas one 𝑇𝑔.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Computing the gas drag force requires a description of the density and velocity of the gas in the coma, for which an intermediate step needs to be included (see Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The initial position of the dust agglomerates is chosen from a probability distribution function that has the same dependence on the subsolar angle as the gas production rate, obtained from the model by Fulle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2020) (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The initial velocity modules are chosen from a Maxwell–Boltzmann distribution 𝑓 (𝑣) = 4 √𝜋 𝑣2 𝑣𝑃3 exp −(𝑣2/𝑣𝑃2), (7) where 𝑣𝑃 is the most probable speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to represent the surface roughness in a simplified way, we include a tangential component to the initial velocity, such as its direction forms an angle 𝜃𝑖 with the local normal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 𝜃𝑖 is chosen from a normal distribution centred on the free parameter 𝜃𝑑 and with standard deviation of 20◦, except for the case 𝜃𝑖 = 0◦, when all the agglomerates start with radial velocities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The dust agglomerates are then characterized by four parameters: their density 𝜌𝑑, radius 𝑟𝑑, most probable initial speed 𝑣𝑃 and most probable initial direction 𝜃𝑑 from the surface normal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The dynamical simulations were carried out individually for each combination of those parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The values used for each parameter are listed on Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A total of 1176 parameter combinations were used for the dynamical integrations, except in the case of STP063, where 1470 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Values used for the dynamical simulations for each dust parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The radii marked with a * were only used for the set STP063.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Parameter Values 𝜌𝑑 [1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 10;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 50;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 100;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 200;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 500;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 800] kg m-3 𝑟𝑑 [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='01;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='05;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 10;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 50;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 80∗;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 100∗] cm 𝑣𝑃 [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='0] m s-1 𝜃𝑑 [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 20;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 40;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 60] ◦ combinations were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Even though some of these combinations do not represent any physically meaningful particle, they are none the less simulated in order to better comprehend the impact of the parameter choice on the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 Gas model The gas simulations are done in two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' First, the gas production rate is calculated based on the model presented by Fulle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This model assumes the nucleus surface to be composed of cm-sized pebbles and water ice sublimating inside them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' When the surface temperature is larger than 205 K, the pressure inside the pebble is high enough to overcome its tensile strength, making dust ejection possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using the heliocentric distances obtained from the header of the images, the production rate as a function of the subsolar angle is calculated (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For the second part, this production rate is used as a boundary condition for the hydrodynamic simulations for the distribution of gas in the coma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' As in previous works (Zakharov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2018, 2021), the initial speed of the gas on the nucleus surface is set to the local sound speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The gas flow is modelled through the Euler equations, which imply the gas is considered to be ideal, at equilibrium and without viscous dissipation or heat conductivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The hydrodynamic simulations are carried out in 2 dimensions using the code PLUTO (Mignone et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2007) until a static solution is achieved (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='3 Generation of synthetic images Using the results of the gas simulations discussed in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2, the system described by equations 3–5 can be numerically solved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The two-dimensional dust agglomerate trajectories obtained from the dynamical modelling are then transformed into three dimensions by using the symmetry assumptions mentioned in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' These three-dimensional trajectories are checked for possible intersections with the camera FOV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' If such intersection occurs, two intersection points, entry and exit, are defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A random position r1 inside the FOV is selected from a linear interpolation between the intersection points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This will be used as one of the endpoints of the synthetic track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The remaining endpoint is defined as r2 = r1 ± v1 × 𝑡𝑒𝑥 𝑝, where v1 is the interpolated velocity at r1, 𝑡𝑒𝑥 𝑝 is the image exposure time and the sign is chosen randomly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' While r1 is enclosed within the camera FOV, that is not necessarily the case for r2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Both endpoints are then projected into the detector plane, obtaining the projected track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The last step involves checking if the track would be bright enough to be detected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For this, tracks for which the mean distance between r1,2 and the camera is larger than a limit distance Δ are discarded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This limit distance is calculated from the equation (Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2016) Δ = √︄ 𝑟𝑑2 𝑝 Φ(𝛼) 𝐼⊙ 𝐽 𝑟ℎ2 , (8) MNRAS 000, 1–12 (2022) Dynamics of dust in the coma of 67P 5 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Gas simulations for the set STP092.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Top: production rate per area unit and surface temperature as a function of the insolation angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The dashed line indicates the 205 K limit from which dust ejection is possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Bottom: Static solution for the gas flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' From top left to bottom right, the panels represent density, pressure, radial and tangential velocities in arbitrary units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The nucleus is at the origin, the illumination comes from the positive 𝑥 axis and the distances are expressed in units of 𝑅𝑁 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' where 𝑝 and Φ(𝛼) are the geometric albedo and phase function of the agglomerate respectively, 𝐼⊙ the solar flux in the corresponding filter with units of W m-2 nm-1, 𝑟ℎ the heliocentric distance in au and 𝐽 is the image background brightness, estimated as its median, with units of W m-2 nm-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 4 ANALYSIS OF THE DETECTED TRACKS 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 Orientation In the first place, the distribution of the orientation angle of the tracks is analyzed by generating their histograms when grouping them into 20 bins spanning the [−90◦ : +89◦] interval, and normalizing the histogram such that the sum of the bar heights equals one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Then, a modified von Mises distribution is used to fit the histogram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The von Mises distribution is an approximation of the normal distribution for a periodic domain, expressed by the equation 𝑓 (𝑥) = exp � cos (𝑥−𝜇) 𝜎2 � 2𝜋𝐼0(1/𝜎2) , (9) where 𝜇 and 𝜎 represent the mean and standard deviation respec- tively, and 𝐼0 is the modified Bessel function of the zeroth order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This function is defined over the [0, 2𝜋] domain, so it is modified in order to match the one of the orientation angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' An example of the Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Normalized histogram for the orientation angle of the tracks de- tected in the image taken in STP092 at a phase angle of 50◦with the Red filter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The bottom panel shows the von Mises fit obtained for the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' normalized histogram for the orientation angles and the von Mises fit can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The mean orientation angle of the tracks in the images as a func- tion of the phase angle can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We find that there is a difference between the mean direction of the tracks and the ra- dial direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The radial direction is defined as the direction of the spacecraft–nucleus vector projected into the image plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' From this Figure we notice that this discrepancy shows two particular features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' First, the deviation is not constant along each set, but depends on the phase angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Second, in almost all cases, the deviation from the radial direction depends on the exposure time of the images: the shorter the exposure time, the closer the mean direction of the tracks is to the radial one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 we discuss about the origins of these features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 Phase function For computing the phase function of the tracks, the photometry of all tracks completely enclosed in the image was performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The method is similar to the one described in Güttler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017), namely performing a morphological dilation of the original track with two ring sizes, in order to obtain two stadium shapes enclosing the track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The size of the discs used for the dilation are estimated from the local gradient image 𝐺 (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The gradient image 𝐺 is calculated as 𝐺 = √︃ 𝐺2𝑥 + 𝐺2𝑦, where 𝐺 (𝑥,𝑦) are the directional gradients obtained using a Gaussian kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The pixels contained in the inner shape are summed to obtain the total track brightness, while the background is estimated as the median value of the pixels between both shapes, and subtracted from the brightness of the central shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The brightness of all the detected tracks entirely contained in the images are represented by the small, coloured dots in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This quantity does not depend on the apparent speed of the agglomer- ates (except in the case the apparent speed is small and the track is shorter than 10 pixels, see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1), but only on their distance to the camera, size and scattering properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The image acquisition method consists of keeping the spacecraft in the same position and rotate it to obtain images at different phase angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We assume that MNRAS 000, 1–12 (2022) X1021 2 300 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 250 K 1 (m T Q 200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 0 150 0 20 40 60 80 Zenith angle (deg-2 log p P 4 log 4 6 20 0 20 20 0 20 R R 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 0 20 0 20 20 0 20 R R0 45 45 90 90 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='3 data Bin counts 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 von Mises fit 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 90 45 0 45 90 Orientation angle (deg)6 P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lemos Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Mean direction of the tracks in the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The symbol represents the mean and the errorbar the standard deviation of the von Mises distribution fitted to the orientation angle histogram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' all observed agglomerates are in the vicinity of the spacecraft and that the population of dust agglomerates generating the tracks for a given set is similar for all phase angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' From this we can estimate the phase function of the agglomerates performing a statistical analysis of the brightness, by assuming that the most representative value for a certain phase angle is the median of all tracks for that image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, the set of track brightness is twofold biased.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' First, as mentioned before images taken at high phase angles are contami- Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Example of the photometry performed on the tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The two stadium shapes on the left panel are obtained by dilating the detected track with a disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The radius of the disc is obtained from the gradient of the image taken from segments perpendicular to the tracks (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The blue line in the left panel represent one of the perpendicular segments along which the image gradient is obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The black dotted lines show the gradient profile along all the segments, while the median of all profiles is shown with the red solid line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using these profiles, the total width of the track (blue solid lines) can be found, and it is used as the width of the inner aperture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The outer aperture has a fixed total width of 20 pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' nated by straylight, which means that the background brightness is much higher than in the images at low phase angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For this rea- son, faint tracks cannot be detected in high phase angle images as they blend into the background, so the sample is biased towards brighter tracks, as can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For the rest of this phase function analysis, tracks obtained from images taken at phase angles greater than 120◦will be discarded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Secondly, the scattering phase function values are higher for low phase angles, so fainter agglom- erates can be detected in them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This effect introduces a bias to the phase function derived from the detections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For overcoming this issue, we will adopt an iterative process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Following the results of Fornasier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2015) and Güttler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2017), we fit the median values of the track brightness using an exponential function of the form 𝑅(𝛼) = 𝐴 × exp(−𝛽 × 𝛼).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using this result as a preliminary phase function, we look for the faintest track in the images taken at the highest phase angle, and extrapolate its brightness for the re- maining images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This extrapolated value is used as a lower threshold for the brightness of the tracks considered for the second iteration step, discarding all fainter tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The fitting is then repeated and the coefficients are calculated again for the debiased sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The chosen expression for the phase function provides a good fit of the values after removing the values for phase angles greater than 120◦from the sample: the mean value of the coefficient of determi- nation 𝑅2 for all samples is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This confirms the results of Fulle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2018), who show the characteristic U-shape function found for the dusty coma phase function (Bertini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2017) is valid for parti- cles with radius 𝑟 < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='25 mm, smaller than the ones observed in the OSIRIS data used in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, we find that the mean value for all the sets is 𝛽 = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 × 10−3, around five times smaller than that found by previous works focusing on the comet nucleus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Following the classic theory by Lumme & Bowell (1981a,b), this can be ex- plained by shadowing due to different surface roughness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Even when the nucleus phase function is corrected for the self-shadowing, the total brightness depends on the pixel resolution, since non-resolved shadows cannot be corrected and affect the final result (see fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 1 in MNRAS 000, 1–12 (2022) STP063 180 135 Mean direction (deg) 90 F28 (40 s) F22 (7 s) 45 F24 (73 s) Solar 口 Radial 0 50 100 150 Phase angle (deg)STP086 180 中 F28 (40 s) F22 (7 s) F24 (73s) Solar 135 口 Radial 90 45 0 0 50 100 150 Phase angle (deg)STP092 180 135 Mean direction (deg) 90 中 F28 (80 s) 45 中 F22 (14 s) 中 F24 (146 s) Solar 口 Radial 0 0 50 100 150 Phase angle (deg)0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 15 5 10 20 0 Position (pixDynamics of dust in the coma of 67P 7 Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Phase function of the observed agglomerates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' From top to bottom are the results for STP063, STP086 and STP092.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The light colored dots indicate the integrated reflectance for each track, while the colored triangles show the median for each phase angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The colored circles represent the median of the integrated reflectance after filtering out dim tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Mean direction of the tracks in the F24 images for STP092, com- pared against the mean directions for the simulated trajectories with radiation pressure factors of 𝐶 = 0, 10 and 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Results for 𝐶 = 1 were almost identical to the ones for 𝐶 = 0, so were not included in this plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Hasselmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Because of this, a lower 𝛽 value for the coma agglomerates is consistent with the smaller size of the dust agglomerates in the coma compared to that of the nucleus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' While analyzing the time evolution of 𝛽, we find that the mean value found for all three filters is 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5×10−3, 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8×10−3 and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4×10−3 for the sets STP063, STP086 and STP092 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using the same argument as before, this can be explained as a reduction of the median size of the agglomerates in the coma, which is consistent with the increase of the heliocentric distance of the comet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5 COMPARISON WITH THE MODEL AND DISCUSSION 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 Orientation angle distribution In order to test if the measured directions introduced in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 can be explained by a projection effect, we create synthetic images corresponding to the STP092 observation geometry, but without tak- ing into account the gaseous drag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Since the results depend on density and size mainly through the gas drag, the choice of these parameters does not affect the results excessively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For this test case, we use ag- glomerates with 𝜌 = 100 kg m-3, 𝑟𝑑 = 1 cm which are ejected from the nucleus with a most probable speed of 1 m s-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to check the relevance of the radiation pressure on this effect, we compute the trajectories of agglomerates in initially radial trajectories under the effect of the gravitational and radiation pressure forces, but including a multiplicative factor 𝐶 for the latter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 8 displays the same plot as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5 for the aforementioned set, but with the mean directions for various 𝐶 values superimposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In the purely gravitational case (𝐶 = 0), the agglomerates move in radial trajectories, but even so, the deviation can be observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This effect can be explained by a simple projection effect: here, the radial direction is defined as the projec- tion onto the image of a vector joining the nucleus and the spacecraft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Since the agglomerates are not in the same position as the spacecraft, the projection of their own (local) radial directions is not necessarily parallel to the one at the spacecraft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Although the projection effect can explain the trend of the deviation from radial direction as a function of the phase angle for the purely MNRAS 000, 1–12 (2022) F24 - Blue (480.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm) F22 - Orange (648.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 nm) F28 - Red (743.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm 10 All lines All lines All lines β1 = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='62e - 03 B1 = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='09e - 03 Bi = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='09e - 02 Debiased Debiased Debiased β2 = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='56e - 03 B2 = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='09e - 03 B2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='09e - 02 10-5 10-6 Brightness 10-8 10-9 20 60 100 140 20 60 100 140 20 60 100 140 Phase AngleF24 - Blue (480.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm) F22 - Orange (648.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 nm) F28 - Red (743.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm 10 All lines All lines All lines βi = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='00e - 03 Bi = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='03e - 02 Bi = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='16e - 03 Debiased Debiased Debiased β2 = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='99e - 03 B2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='03e - 02 B2 = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='16e - 03 10-5 王 + 10-6 Brightness 10-7 10-8 10-9 20 60 100 140 20 60 100 140 20 60 100 140 Phase AngleF24 - Blue (480.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm) F22 - Orange (648.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 nm) F22 - Red (743.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7 nm 10 All lines All lines All lines B1 = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='99e - 03 B1 = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='87e - 03 Bi = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='11e - 03 Debiased Debiased Debiased B2 = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='99e - 03 β2 = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='14e - 03 B2 = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='06e - 03 10- 10-6 10-8 10-9 20 60 100 140 20 60 100 140 20 60 100 140 Phase Angle180 135 Mean direction (deg) 90 Observed 45 C=0 C = 10 C = 100 Radial Solar 20 40 60 80 100 120 140 Phase angle (deg)8 P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lemos gravitational (𝐶 = 0) case, this effect alone is not sufficient to explain the absolute value of the deviation at phase angles larger than 120◦.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Additionally, the observed angular dispersion is larger than the one found with this model for all phase angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, a value of 𝐶 ≫ 1 can account for the mean direction as well as the angular dispersion in the high phase angle region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' From a physical perspective, several processes can be invoked in order to explain an enhancement of the radiation pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For example, an agglomerate mass versus cross section ratio smaller than the one used for the integration, either caused by a lower density or a nonspherical shape, can explain the higher radiation pressure effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Also, additional forces parallel to the solar direction, such as outgassing from slowly rotating agglomerates (Kelley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2013), can account for the effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, it is worth noticing the limitations given by the choice of boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to reduce the time required for the simulations, the integration domain limit is set to an altitude 20 km higher than that of the spacecraft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In the case that some agglomerates that are ejected from the nucleus decelerate and fall back at altitudes above the domain limit, their trajectories would not be taken into account by our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The inclusion of these agglomerates may be able to modify the results, even for typical values of radiation pressure forces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This finding represents a nuance with respect to previous results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Della Corte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2016) and Longobardo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2019, 2020) report radial trajectories for particles analyzed by GIADA, but the smaller size of these particles compared to those that OSIRIS is able to observe (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2) may explain this feature, since they are more affected by the gaseous drag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In addition, Longobardo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2020) proposes that the motion of the particles could only be considered to be radial up to altitudes of ∼ 40 km, since the radiation pressure plays an important role for higher altitudes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Likewise, Gerig et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2018) find that dust agglomerates observed by OSIRIS follow a free-radial outflow from the nucleus for distances larger than 12 km from it, but their analysis is limited to altitudes up to 40 km.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' On the other hand, Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021) analyze similar OSIRIS images, with tracks generated by the motion of dust agglomerates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' As in our case, they find that most of the agglomerates have trajectories close to the radial direction, and interpret the remaining ones as a population of objects on bound orbits around the nucleus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' As a summary, the explanation for the track direction presented in this work proposes that agglomerate trajectories have a clear general orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Like the interpretation proposed in previous works, we find that this general orientation is close to the radial direction once the projection effect is taken into account.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, for phase angles greater than 120◦, both the most probable orientation angle and its dispersion cannot be well reproduced by radial trajectories only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For explaining these trajectories, forces other than the gravity of the nucleus (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' radiation pressure) or, following the explanation by Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021), an increased proportion of agglomerates in bounded orbits, must be considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 Dust parameter optimization As was explained in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='3, the generation of synthetic images is done based on the trajectories computed for individual dust pa- rameters combinations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, no single parameter combination will fully represent the observations, since the analyzed OSIRIS im- ages contain tracks generated by a variety of different agglomerates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For overcoming this issue, we will assume that the distribution of track properties obtained from the real images can be expressed as a linear combination of those in the synthetic ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This implies that interactions between different types of agglomerates, either directly through collisions or indirectly mediated by the gas in the coma, are considered not relevant for their dynamical evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We will focus on comparing the distribution of track properties in the length–orientation angle space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to compare the distribu- tions of these properties with those obtained from the real images, we generate normalized histograms with equal bin ranges for tracks detected in both types of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Mathematically, this is equivalent to creating a 2D matrix in which each element represents the number of tracks with a specific combination of length and orientation angle, and normalizing it by the total number of tracks in the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Then, for each observation geometry there exist two types of histograms, the one obtained from the real image x, and from the synthetic ones y𝑖, where 𝑖 represents the different dust properties used for the simu- lations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We generate the master synthetic histogram Z from a linear combination Z = � 𝑖 𝐾𝑖y𝑖.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The coefficients 𝐾𝑖 for the linear combi- nation, which roughly represent the preponderance of agglomerates with certain properties in the image, are chosen in such a way that they minimize the 𝜒2 distance between the real x and master syn- thetic Z histograms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The 𝜒2 distance measures the distance between two histograms with 𝑁 bins, and is defined as 𝜒2 = 1 2 𝑁 ∑︁ 𝑗=1 (𝑥 𝑗 − 𝑍 𝑗)2 (𝑥 𝑗 + 𝑍 𝑗) , (10) where 𝑥 𝑗 and 𝑍 𝑗 represent the value of each bin for the real x and master synthetic Z histograms respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The 𝐾𝑖 coefficients giving the best-fitting are found using an it- erative linear least-squares solver with random initial values, while the constraints for the coefficients are 𝐾𝑖 > 0 and � 𝑖 𝐾𝑖 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For simplicity, we computed the 𝜒2 distance for all the individual his- tograms y𝑖 and calculated the master synthetic histogram Z using only a subset of 100 synthetic images with the best results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to check whether the image subset choice affects the results, we re- peated the fit with different number of synthetic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We found that performing the fit with this 100 synthetic images provided the best compromise between accuracy of the results, showed by the residual after the fit, and the execution time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' An example of the best fit histograms obtained using this method can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We use these 𝐾𝑖 values to obtain a weighted distribution of the parameters used in the simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 10 shows an example of this weighted distribution for an image from the set STP092 taken at a phase angle 𝛼 = 120◦ with the Blue filter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This distribution is obtained by grouping the synthetic images by their values of a certain parameter (in the case of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 10, density, agglomerate radius, initial speed and initial direction), and summing the 𝐾𝑖 values of the groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We found that varying the number of synthetic images used for the fit does not change significantly these results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The weighted means can be found from the mentioned distri- butions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 11 shows the dependence of these means with the phase angle at which the images were taken.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We plot 4 differ- ent parameters: agglomerate density, radius, most probable ini- tial speed and mass over area ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The mass over area ratio 𝑀/𝐴 = (4/3𝜋𝜌𝑟𝑑3)/(𝜋𝑟𝑑2) = 4/3𝜌𝑟𝑑 is useful for quantifying the effect of the radiation pressure and gaseous drag over the agglom- erate dynamics, since both the 𝐹𝐺/𝐹𝑅 and 𝐹𝐺/𝐹𝐷 ratios depend linearly on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This model is not able to provide tight constraints for the density, and hence neither for the 𝑀/𝐴 ratio, but a clear trend can be seen for the remaining parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lastly, as can be seen in the bottom panel in Figure 10, the results are independent from the choice of the angle between initial velocity and local normal 𝜃𝑑, so are not shown here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This is because while the initial velocity of the MNRAS 000, 1–12 (2022) Dynamics of dust in the coma of 67P 9 Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Observed and fitted distribution of the tracks in the orientation angle versus track length found for the set taken at STP092 with a phase angle of 𝛼 =30◦.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The rows represent the filter used for the image acquisition (from top to bottom Blue, Orange and Red), while the left and right columns show observed and fitted distributions respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The color code represent the number of tracks found in that particular bin, normalized by the total number of tracks in the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' dust agglomerates is not radial, the initial velocity of the gas is, so the gaseous drag force, which is very strong due to the high gas density in the vicinity of the nucleus, cancels out the possible effect of this non-radial initial velocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' As can be seen in Figure 11, the results for the images obtained using the Blue and Red filters match with each other, while the ones for the Orange filter show a larger deviation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' As mentioned in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1, the exposure times used for this type of images introduces a bias towards agglomerates that are faster/closer to the camera, and may be responsible for the observed discrepancies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' It can be noted that the mean sizes for the chunks follow the expected trend: the mean chunk sizes are larger for the sets taken closer to the perihelion, and they are larger for increasing phase angles, that is, looking into the dayside of the coma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to increase the sample size, the combined results for the chunk size as a function of phase angle obtained from the fits for the Blue and Red filters are shown in Figure 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, the sizes are much larger than the theoretical maximum liftable size found using the model by Fulle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In contrast, Gundlach et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2020) and Ciarniello et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2022) show that CO2 ice sublimation is the main driver of the activity of chunks with sizes ≳ 10 cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This is because the water sublimation front is located at shallower depths from the surface, so it can only build up enough pressure to overcome the material internal strength and eject chunks at these shallow depths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' On the other hand, the CO2 sublimation front is located deeper, allowing to eject larger chunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' It is important to notice that the studied case assumes that the agglomerates are ejected when the gas pressure overcomes the Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Weighted distribution of the parameters found for the image taken at 𝛼 = 120◦ with the Blue filter in the set STP092.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' material tensile strength.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Therefore this model is not able to analyze the case where a detached agglomerate resting on top of the surface manages to gain an initial impulse and gets lifted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The scenario where the chunks are ejected via CO2 sublimation is consistent with the observed nonzero mean initial speed of the agglomerates found in our simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Since CO2 sublimation is not included in our model, this can be represented as an initial kick to the chunks, making it possible for them to be lifted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Once in the coma, the agglomerates evolve dynamically under the influence of the mentioned forces, in particular gas drag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, since the CO2 production rate is around one order of magnitude lower than that of H2O, the latter controls the drag force, and the assumption of a coma composed by water vapour is still valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Regarding the initial speed values, we observe that initial velocities as derived from histogram fitting are larger for the set from STP063, that is, the closest one to perihelion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Moreover, in the purely gravi- tational case (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' without radiation pressure nor gas drag), the initial speed required for reaching the altitudes at which the spacecraft was located in all the three analyzed sets is ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='80 m s-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Also for all three data sets, the initial velocities found via the histogram fitting (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 11) are sufficient for the chunks to reach the spacecraft altitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Hence, the mere presence of these agglomerates in the FOV does not imply that they have been significantly accelerated by gas drag after leaving the nucleus surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Due to their large size, it is unclear whether gas drag has any relevance to the dynamic evolution of these chunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We use an indirect approach to estimate the effect of gas drag on the observed chunks, based on the fraction of them having bound orbits and the distribution of initial velocities of agglomerates inside the FOV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' If we assume the gas drag does not influence the dynamics of the chunks, the initial speed needed to reach the FOV (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='80 m s-1) MNRAS 000, 1–12 (2022) 90 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='18 45 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='16 45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='14 F24 (Blue)- obs F24 (Blue)- fitted 98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='12 45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='08 F22 (Orange) - obs F22 (Orange) - fitted 90 90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='06 45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='04 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='02 45 F28 (Red) - obs F28 (Red) - fitted 90 0 0 200 400 6000 200 400 600 Track length (pix)0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 0 1 10 50 100 200 500 800 p (kg/m3) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 5 10 50 1 rd (cm) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 1 2 5 10 Up (m/s) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='5 0 0 20 40 60 OD (deg)10 P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lemos Figure 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Weighted mean for the dust parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The four selected parameters, density, radius, initial speed and 𝑀/𝐴 ratio are displayed in columns from left to right, while the results for STP063, STP086 and STP092 are shown in the top, middle and bottom rows respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Line colors indicate the used filter as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Figure 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Weighted mean chunk size obtained from the combinations of Blue and Red filters as a function of the phase angle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' It can be seen that the chunk size increases with smaller heliocentric distances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' is close to the escape speed for the spherical nucleus (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='82 m s-1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Since the initial velocities are taken from the Maxwell–Boltzmann distribution given in equation 7, we calculate the probability that an agglomerate has an initial speed sufficient for reaching the FOV, but still smaller than the escape speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We find that this condition is fulfilled by only a small proportion (<1%) of agglomerates in this purely gravitational case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, if we analyze the energy of the agglomerates that inter- sect the FOV in our simulations (which include the effect of gas drag), we find a much higher proportion of bound orbits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This number is highly variable between data sets, ranging from 0 to 30 per cent, with a mean of 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We interpret this finding such that the majority of the bound chunks were initially lifted with speeds too small to reach the FOV, but were subsequently accelerated towards crossing the FOV by gas drag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This interpretation is supported by 32 per cent of the chunks that reach the FOV having initial speeds lower than needed to reach the spacecraft altitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' All this implies that the dynamics of the large chunks found in our simulations is still affected by gas drag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' It is important to note that the chunk sizes found in this work are larger than the ones found by Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021) for the same type of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Agglomerate sizes compatible with Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021) would have too high velocities in our model to be compatible with the observed length of tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 13 shows the median track lengths found in the synthetic images as a function of the agglomerate size, where it can be seen that for small particles, the tracks are longer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The reason for this behaviour is that after acceleration by gas drag, the ratio 𝐹𝐺/𝐹𝐷 ∝ 𝑟𝐷 for a fixed particle density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This implies that smaller particles are more susceptible to the action of the gas drag, and can acquire higher velocities generating longer tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For agglomerate sizes compatible with Frattin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2021), the tracks in the synthetic images are longer than those in our OSIRIS images, so our data can only be reproduced by larger chunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This effect is particularly noticeable for the case of STP063, where the gas flow was so intense that additional simulations with larger chunks had to be carried out (see Table 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' However, there may exist other effects that slow down the agglom- erates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' For example, the effect of solar gravity that was not taken into account in the dynamical simulations could provide an alternative way of producing shorter tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Since the simulations were carried out in the non inertial nucleocentric reference frame, the net force MNRAS 000, 1–12 (2022) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 6 600 450 TP063 400 4 300 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 200 2 150 S 0 0 0 0 600 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 6 450 STP086 (kg/ 三4 e0 400 300 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 A150 S α 200 M 0 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 6 600 450 STP092 4 400 300 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 200 2 150 S 0 0 0 0 0 50 100 50 100 50 100 0 0 Phase angle (deg)0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='8 宜0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='6 p .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' @.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='. STP063 ×-STP086 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='4 含-STP092 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='2 0 50 100 150 Phase angle (deg)Dynamics of dust in the coma of 67P 11 Figure 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Median track length found in synthetic images for different values of particle radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Each black dot represents the length median value of all the track found in a synthetic image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Red symbols show the median value of all images grouped by the particle size used for the simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A clear trend can be seen where larger agglomerates generate shorter tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' acting over the agglomerates in this frame is the difference between solar gravity force over the agglomerate and the nucleus, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the tidal force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This force increases linearly with the nucleocentric distance, and due to the observation geometry (the nucleus–spacecraft vector forming ≃90◦with the radial direction), its direction at the position of the spacecraft is radial pointing to the nucleus, effectively increasing the value of the nucleus gravity acceleration and slowing even further the observed dust agglomerates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Assuming that all the agglomerates present in the images are at the same height from the nucleus as the spacecraft, and using the approximated expression for the tidal acceleration 𝑎𝑇 = G𝑀⊙𝑟/𝑟3 ℎ, the ratio between tidal and nucleus gravity forces 𝐹𝑇 /𝐹𝐺 is equal to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='19, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='02 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='7×10−3 for the sets STP063, STP086 and STP092 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Then, the tidal ef- fects may play a relevant role in the dynamical evolution of the dust agglomerates, mainly for the first set .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 6 CONCLUSIONS We developed a semi automatic method to detect tracks generated by agglomerates moving in front of the OSIRIS camera onboard Rosetta.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This method exploits the fact that the agglomerates move in front of the camera generating the particular track pattern.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We applied this method to three different image sets taken with the NAC camera composed by a total of 105 images, and detected 20033 tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We analyzed the photometric data obtained from those tracks, and found that the agglomerates’ phase functions do not show the characteristic U-shape found for the phase function of the coma (Bertini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2017), but rather follow the same exponential trend as the one shown by the nucleus (Fornasier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Güttler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Following Fulle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (2018), this establishes a lower limit for the agglomerate sizes at 𝑟 > 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='25 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The value of the phase function exponent 𝛽 found for the agglomerates is smaller than the nucleus one, consistent with the difference in roughness scales between both samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We also observed that the 𝛽 value decreases for increasing heliocentric distances, indicating a decrease in the median size of the agglomerates detected in the coma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We used a simplified dynamical model in order to create synthetic images that reproduce the observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We solved the inverse prob- lem to find the values characterizing the dust that best reproduce the observed tracks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Using this method we could impose a loose con- straint in the density (𝜌 = 200 – 800 kg m-3), but tighter ones for the initial velocities (𝑣𝑃 ≃ 1 m s-1) and chunk radii (several dm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Both the initial velocities and chunk radii vary for different helio- centric distances, consistent with the gaseous production rate and the observed phase function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Even when the radii obtained by the comparison between the observation and the dynamical model only provide an upper limit, the activity model used here cannot provide the required pressure needed to lift agglomerates of such sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Instead, it is necessary to invoke other source of gas like CO2 (Gundlach et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 2020) in order to explain the ejection of those chunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We also showed that other dynamical effects such as solar gravity may play an important role in determining the dynamics of the ag- glomerates, principally for sets taken closer to perihelion, where the combination of the small heliocentric distance with the high space- craft altitude makes the agglomerates seen by the spacecraft much more susceptible to its effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' In order to better model the dynamics of the agglomerate, this effect must be taken into account for future works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' ACKNOWLEDGEMENTS We thank the referee for his constructive suggestions that significantly helped to improve the quality of this manuscript.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We thank Nick Atree, Yuna Kwon, Manuela Lippi, Johannes Markannen, Raphael Marschall and Marius Pfeifer for our fruitful discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' OSIRIS was built by a consortium of the Max-Planck-Institut für Sonnensys- temforschung, Göttingen, Germany;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the CISAS University of Padova, Italy;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the Laboratoire d’Astrophysique de Marseille, France;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the In- stituto de Astrofísica de Andalucia, CSIC, Granada, Spain;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the Re- search and Scientific Support Department of the European Space Agency Noordwijk, The Netherlands;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the Instituto Nacional de Téc- nica Aeroespacial, Madrid, Spain;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the Universidad Politécnica de Madrid, Spain;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' the Department of Physics and Astronomy of Uppsala University, Sweden;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' and the Institut für Datentechnik und Kommu- nikationsnetze der Technischen Universität Braunschweig, Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The support of the national funding agencies of Germany (DLR), France (CNES), Italy (ASI), Spain (MEC), Sweden (SNSB), and the ESA Technical Directorate is gratefully acknowledged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' We thank the Rosetta Science Ground Segment at ESAC, the Rosetta Missions Op- erations Centre at ESOC and the Rosetta Project at ESTEC for their outstanding work enabling the science return of the Rosetta Mis- sion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' This work used the Scientific Compute Cluster at GWDG, the joint data center of Max Planck Society for the Advancement of Sci- ence (MPG) and University of Göttingen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' The authors acknowledge funding by the ERC Starting Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 757390 Comet and Aster- oid Re-Shaping through Activity (CAstRA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' PL conducted the work in this paper in the framework of the International Max-Planck Re- search School (IMPRS) for Solar System Science at the University of Göttingen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' JA acknowledges funding by the Volkswagen Foundation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' DATA AVAILABILITY The data underlying this article are available at the Planetary Science Archive of the European Space Agency under https: //www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='cosmos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='esa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='int/web/psa/rosetta MNRAS 000, 1–12 (2022) 350 300 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='. .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 250 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' (pix) 200 : e Track 150 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' 100 50 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' E E 10-4 10-3 10-2 10-1 100 rd (m)12 P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Lemos REFERENCES Agarwal J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2016, MNRAS, 462, S78 Bertini I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2017, MNRAS, 469, S404 Bird G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 1994, Molecular Gas Dynamics And The Direct Simulation Of Gas Flows Ciarniello M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2022, Nature Astronomy, 6, 546 Della Corte V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2016, MNRAS, 462, S210 Della Corte V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2019, A&A, 630, A25 Drolshagen E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2017, Planet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Space Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 143, 256 Duda R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Hart P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 1972, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' ACM, 15, 11–15 Fornasier S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2015, A&A, 583, A30 Frattin E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2017, MNRAS, 469, S195 Frattin E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2021, MNRAS, 504, 4687 Fulle M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2016, ApJ, 821, 19 Fulle M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2018, MNRAS, 476, 2835 Fulle M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Blum J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Rotundi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Gundlach B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Güttler C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Zakharov V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2020, MNRAS, 493, 4039 Gerig S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2018, Icarus, 311, 1 Gundlach B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Fulle M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Blum J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2020, MNRAS, 493, 3690 Güttler C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2017, MNRAS, 469, S312 Hasselmann P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2021, Icarus, 357, 114106 Hough P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 1962, Method and means for recognizing complex patterns Keller H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2007, Space Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 128, 433 Kelley M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Lindler D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Bodewits D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', A’Hearn M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Lisse C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Kolokolova L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Kissel J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Hermalyn B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2013, Icarus, 222, 634 Kwon Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Bagnulo S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Markkanen J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Agarwal J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Kolokolova L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Levasseur- Regourd A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Snodgrass C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Tozzi G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2022, A&A, 657, A40 Longobardo A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2019, MNRAS, 483, 2165 Longobardo A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2020, MNRAS, 496, 125 Longobardo A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2022, MNRAS, 516, 5611 Lumme K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Bowell E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 1981a, AJ, 86, 1694 Lumme K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Bowell E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 1981b, AJ, 86, 1705 Mannel T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2019, A&A, 630, A26 Merouane S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2017, MNRAS, 469, S459 Mignone A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Bodo G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Massaglia S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Matsakos T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Tesileanu O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Zanni C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Ferrari A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2007, ApJS, 170, 228 Ott T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2017, MNRAS, 469, S276 Pfeifer M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Agarwal J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Schröter M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2022, A&A, 659, A171 Rotundi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2015, Science, 347, aaa3905 Tubiana C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2015, A&A, 583, A46 Zakharov V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Ivanovski S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Crifo J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Della Corte V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Rotundi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Fulle M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2018, Icarus, 312, 121 Zakharov V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Rodionov A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Fulle M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Ivanovski S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Bykov N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Della Corte V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', Rotundi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=', 2021, Icarus, 354, 114091 This paper has been typeset from a TEX/LATEX file prepared by the author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} +page_content=' MNRAS 000, 1–12 (2022)' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdE4T4oBgHgl3EQfGgyE/content/2301.04895v1.pdf'} diff --git a/FNFKT4oBgHgl3EQfai5n/content/tmp_files/2301.11808v1.pdf.txt b/FNFKT4oBgHgl3EQfai5n/content/tmp_files/2301.11808v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..e7f0ca7ba6360b1ab358db758b712ea3d5645c7f --- /dev/null +++ b/FNFKT4oBgHgl3EQfai5n/content/tmp_files/2301.11808v1.pdf.txt @@ -0,0 +1,3517 @@ +arXiv:2301.11808v1 [math.ST] 27 Jan 2023 +Optimal Rate for Parameter Estimation in +Matrix-variate Deviated Models +Nhat Ho† +Dat Do⋄ +Huy Nguyen† +Khai Nguyen† +University of Texas, Austin†; University of Michigan, Ann Arbor⋄ +January 30, 2023 +Abstract +We study the maximum likelihood estimation (MLE) in the matrix-variate deviated model +where the data are generated from the density function (1 − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗) where h0 +is a known function, λ∗ ∈ [0, 1] and (µ∗, Σ∗) are unknown parameters to estimate. The main +challenges in deriving the convergence rate of the MLE mainly come from two issues: (1) The +interaction between the function h0 and the density function f; (2) The deviated proportion +λ∗ can go to the extreme points of [0, 1] as the sample size goes to infinity. To address these +challenges, we develop the distinguishability condition to capture the linear independent relation +between the function h0 and the density function f. We then provide comprehensive convergence +rates of the MLE via the vanishing rate of λ∗ to 0 as well as the distinguishability of h0 and f. +1 +Introduction +The goodness-of-fit test [9] is one of the foundational tools in statistics with several applications +in data-driven scientific fields, namely kernel Stein discrepacy [22, 26], point processes [31] and +Bayesian statistics [27], etc. Given a sample set of data and a pre-specified distribution with density +function h0, the test indicates whether the samples are reasonably distributed according to h0 (null +hypothesis) or to another family of distributions {p(·|θ) : θ ∈ Θ} (alternative hypothesis). It is +worth noting that knowledge about the null hypothesis distribution can come from prior knowledge +of scientists. A key to understand the statistical efficiency of testing is via the likelihood ratio and +the maximum likelihood estimation (MLE) methods. [4]. +While traditional testing problems often assume the null distribution h0 = p(·|θ0) and the alternative +one p(·|θ) are from a single simple family of distributions such as exponential families, it is also +necessary to comprehend the statistical properties of a testing problem in which the alternative +f(·|θ) can be deviated from h0 by a distribution from a potentially different family. Specifically, in +this paper, we consider the family of distributions named matrix-variate deviated model with density +functions defined as follows: +pG(x) := (1 − λ)h0(x) + λf(x|µ, Σ), +(1) +where G := (λ, µ, Σ) are the model’s parameters with λ ∈ [0, 1] being the deviated proportion (from +h0) and (µ, Σ) ∈ Θ × Ω are parameters of a vector-matrix family of distributions f, where Θ ⊂ Rd1 +and Ω ⊂ Rd2×d2 being compact. When λ = 0, this recovers the null hypothesis distribution h0. +As the core of the hypothesis testing is studied via the MLE of our model under the alternative +hypothesis, we directly investigate the behavior of the MLE of the deviated model type in the paper. +1 + +Problem setting. Suppose that we observe n i.i.d. samples X1, . . . , Xn from the true matrix- +variate deviated model: +pG∗(x) := (1 − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗), +(2) +where G∗ = (λ∗, µ∗, Σ∗) are unknown parameters with λ∗ ̸= 0. Throughout the paper, we allow +G∗ to change with the sample size n. To facilitate our presentation, we suppress the dependence of +G∗ on n, and then estimate G∗ from the data. The main focus of this paper is to establish both +convergence rate and minimax rate for parameter estimation via the MLE approach, which is given +by: +�Gn ∈ arg max +G∈Ξ +n +� +i=1 +log pG(Xi), +(3) +where �Gn = (�λn, �µn, �Σn) and Ξ := [0, 1] × Θ × Ω. +Contribution. There are two main challenges in studying the convergence rate of the MLE �Gn: +(1) The interaction between the function h0 and the density function f, e.g., h0 is the mixture +of densities f and (µ∗, Σ∗) approaches one of the components of h0 as the sample size n goes to +infinity; (2) The deviated proportion λ∗ can go to the extreme points of [0, 1] as the sample size goes +to infinity. To address these issues, we first develop the distinguishability condition to capture the +linear independent relation between the function h0 and the density function f. We then study the +optimal convergence rate of parameters under both distinguishable and non-distinguishable settings +of the matrix-variate deviated model. Our theoretical results can be summarized as follows: +1. +Distinguishable settings: We demonstrate that as long as the function h0 and the den- +sity function f are distinguishable, the convergence rate of �λn to λ∗ is O(n−1/2) while the con- +vergence rate of (�µn, �Σn) to (µ∗, Σ∗) is determined by the rate that λ∗ goes to 0 as follows: +λ∗∥(�µn, �Σn) − (µ∗, Σ∗)∥ = O(n−1/2). It indicates that if λ∗ goes to 0 at a rate slower than n−1/2, +the convergence rate of estimating (µ∗, Σ∗) is slower than parametric rate. +2. +Non-distinguishable settings: When h0 and f are not distinguishable, the convergence +rates of the MLE become complicated to characterize. +To shed light on some of the behaviors +of the MLE under the non-distinguishale settings of matrix-variate deviated model, we specifically +study the simple setting when h0 belongs to the same family as f, namely, h0(.) = f(.|µ0, Σ0) +for some (µ0, Σ0). To precisely characterize the rates of the MLE under this setting, we consider +the second-order strong identifiability of f, which requires the linear independence up to second +order derivatives of f with respect to its parameters. The second-order identifiability had also been +considered in the literature to investigate the convergence rate of parameter estimation in finite +mixtures [8, 24, 17, 16, 15, 14, 23]. +• Strongly identifiable and non-distinguishable settings: When f is strongly identi- +fiable in the second order, we demonstrate that ∥(∆µ∗, ∆Σ∗)∥2|�λn − λ∗| = O(n−1/2) and +λ∗∥(∆µ∗, ∆Σ∗)∥∥(�µn, �Σn) − (µ∗, Σ∗)∥ = O(n−1/2), where ∆µ = µ − µ0 and ∆Σ = Σ − Σ0. It +indicates that the convergence rate of �λn to λ∗ depends on that of (µ∗, Σ∗) to (µ0, Σ0) while +the convergence rate of (�λn, �Σn) to (λ∗, Σ∗) depends on both the rate of λ∗ to 0 and the rate +of (µ∗, Σ∗) to (µ0, Σ0). These results are strictly different from those in the distinguishable +settings, which is mainly due to the non-distinguishability between h0 and f. +2 + +• Weakly identifiable and non-distinguishable settings: When f is weakly identifiable, +i.e., it is not strongly identifiable in the second order, we specifically consider the popular set- +ting when f is the density of a multivariate Gaussian distribution. The loss of the strong iden- +fiability of the Gaussian distribution is due to the partial differential equation (PDE) between +the location and scale parameters (cf. equation (9)). Due to that PDE, the convergence rate +of the MLE under that setting exhibits very different behaviors from those under the strongly +identifiable setting. In particular, we prove that +� +∥∆µ∗∥8 + ∥∆Σ∗∥4� +|�λn−λ∗|2 = O(n−1) and +(λ∗)2(∥∆µ∗∥4 + ∥∆Σ∗∥2)(∥�µn − µ∗∥4 + ∥�Σn − Σ∗∥2) = O(n−1). Notably, there is a mismatch +in the orders of convergence rates of the location and covariance matrix. Furthermore, the +rate of the deviated mixing proportion also depends on different orders of µ∗ to µ0 and Σ∗ +to Σ0. Such rich behaviors of the MLE are mainly due to the PDE between the location and +scale parameters. +Related work. When f is the density of a location Gaussian distribution, the convergence rate +of parameter estimation in the deviated model had been studied in the work of [12]. Since the +location Gaussian distribution is a special case of the strongly identifiable distribution, our result +in the strongly identifiable and non-distinguishable settings is a generalization of the results in [12], +but with a different proof technique as their proof technique relies strictly on the properties of the +location Gaussian distribution. +The hypothesis testing problem related to the matrix-variate deviated model had been considered +in previous work, including the problem of detecting sparse homogeneous and heteroscedastic mix- +tures [11, 2, 1, 3, 29], the problem of determining the number of components [6, 21, 7, 18, 20], and +the problem of multiple testing [25, 10]. +Notations. For any a, b ∈ R, we denote a∨b = max {a, b} and a∧b = min {a, b}. Next, we say that +h0 is identical to f if h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ. For each parameter G ∈ Ξ, let +EpG be the expectation taken with respect to product measure with density pG. Lastly, for any two +density functions p and q (with respect to the Lebesgue measure m), the Total Variance distance +is given by V (p, q) := 1 +2 +� +|p(x) − q(x)|dm(x), while we define the squared Hellinger distance as +h2(p, q) := 1 +2 +� +[ +� +p(x) − +� +q(x)]2dm(x). +2 +Preliminary +2.1 +Identifiability Condition +Our principal goal in this paper is to assess the statistical efficiency of parameter estimation from +the MLE method. To do that, we should be able to guarantee the parameter identifiability of the +deviated model (2), i.e., if pG(x) = pG∗(x) for almost surely x ∈ X where G = (λ, µ, Σ), then +G ≡ G∗. That identifiability condition leads to the following notion of distinguishability between +the density function h0(·) and the family of density functions {f(·|µ, Σ) : (µ, Σ) ∈ Θ × Ω}. +Definition 1. We say that the family of density functions {f(·|µ, Σ), (µ, Σ) ∈ Θ × Σ} (or in short, +f) is distinguishable from h0 if the following holds: +A1. For any two distinct components (µ1, Σ1) and (µ2, Σ2), if we have real coefficients ηi for +3 + +1 ≤ i ≤ 3 such that η1η2 ≤ 0 and +η1f(x|µ1, Σ1) + η2f(x|µ2, Σ2) + η3h0(x) = 0, +for almost surely x ∈ X, then η1 = η2 = η3 = 0. +We can verify that as long as f is distinguishable from h0, the parameter identifiability of our +matrix-variate deviated model follows. In particular, assume that there exists G = (λ, µ, Σ) such +that +(1 − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗) += (1 − λ)h0(x) + λf(x|µ, Σ), +(4) +for almost surely x ∈ X. The above equation is equivalent to (λ − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗) − +λf(x|µ, Σ) = 0. +Assume that f is distinguishable from h0, then equation (4) indicates that if +(µ, Σ) ̸= (µ∗, Σ∗), then we have λ = λ∗ = 0. Since λ∗ ̸= 0 from our assumption, we obtain that +(µ, Σ) = (µ∗, Σ∗). As a result, equation (4) becomes (λ∗−λ)h0(x)+(λ∗−λ)f(x|µ, Σ) = 0. By apply- +ing the distinguishability condition again, we get λ = λ∗. Therefore, the matrix-variate deviated model (2) +is identifiable. +In the following example, we demonstrate that the distinguishability condition is (not) satisfied by +many choices of h0 and f. +Example 1. (a) Assume that f is in a location family of density functions, i.e., f(x|µ, Σ) = +fΣ(x − µ) for all x where Σ is a fixed covariance matrix. If h0(x) ̸= f(x) for almost surely x ∈ X, +then f is distinguishable from h0. +(b) When h0 is a finite mixture of multivariate Gaussian densities and f belongs to a class of mul- +tivariate Student’s density function with any fixed odd degree of freedom ν > 1, we get that f is +distinguishable from h0. +(c) When f is identical to h0, then f is not distinguishable from h0. +2.2 +Convergence Rate of Density Estimation +Our strategy to obtain the convergence rate of the MLE �Gn is by first establishing the convergence +rate of density p � +Gn and then studying the geometric inequalities between the parameters’ space and +densities’ space. For the former, the standard method is to use the empirical process theory [13, 28]. +For the latter step, we will investigate those inequalities for various settings of distinguishability in +Section 3. +Now we proceed to describe the convergence rate of density estimation under the Hellinger distance +and give a general result for the matrix-variate deviated model. +This convergence rate can be +deduced from the complexity of the set: +P +1/2 +k +(Ξ, ǫ) = +� +¯p1/2 +G +: G ∈ Ξ, h(¯pG, pG∗) ≤ ǫ +� +, +(5) +4 + +where for any G ∈ Ξ, we denote ¯pG := (pG + pG∗)/2. We measure the complexity of this class +through the bracketing entropy integral +JB(ǫ, P +1/2 +k +(Ξ, ǫ)) = +� ǫ +ǫ2/213 H1/2 +B (u, P +1/2 +k +(Ξ, ǫ))du ∨ ǫ, +(6) +where HB(ǫ, P) denotes the ǫ-bracketing entropy number of a metric space P. +We require the +following assumption. +A2. Given a universal constant J > 0, there exists N > 0, possibly depending on Θ and k, such +that for all n ≥ N and all ǫ > (log n/n)1/2, +JB(ǫ, P +1/2 +k +(Ξ, ǫ)) ≤ J√nǫ2. +Theorem 1. Assume that Assumption A2 holds, and let k ≥ 1. Then, there exists a constant C > 0 +depending only on Θ and k such that for all n ≥ 1, +sup +G∗∈Ξ +EpG∗h(p � +Gn, pG∗) ≤ C +� +log n/n. +Therefore, in order to get the convergence rate for density estimators based on the MLE method, +we only need to check Assumption A2, which holds true for several parametric models [28]. For our +model, we give an example that it holds for a general class of f and h0. +Proposition 1. Let f be a location-scale Gaussian density function with parameters (µ, Σ) ∈ Θ×Ω. +Suppose that there exist positive constants a, ℓ, u such that Θ = [−a, a]d and eigenvalues of Σ are +bounded in [ℓ, u] for any Σ ∈ Ω. Additionally, we assume that the function h0 is bounded with tail +− log h0(x) ≳ ∥x∥q for some q > 0. Then, the corresponding matrix-variate deviated model defined +in equation (1) satisfies assumption A2. +3 +From the Convergence Rate of Densities to Rate of Parameters +The objective of this section is to develop a general theory according to which a small distance +between pG and pG∗ under the Hellinger distance (or Total Variation distance) would imply that +G and G∗ is also close under appropriate distance where G = (λ, µ, Σ) and G∗ = (λ∗, µ∗, Σ∗). +By combining those results with Theorem 1, we can obtain the convergence rate for parameter +estimation (cf. Section 4). The distinguishability condition between h0 and f implicitly requires +that pG = pG∗ would entail G = G∗; however, to obtain quantitative bounds for their Total +Variation distance, we will need stronger notions of both distinguishability and classical parameter +identifiability, ones which involve higher order derivatives of the densities h0 and f, taken with +respect to mixture model parameters. Throughout the rest of this section, we denote G = (λ, µ, Σ) +and G∗ = (λ∗, µ∗, Σ∗). +3.1 +Distinguishable Settings +Definition 2. We say that f is distinguishable from h0 up to the first order if f is first time +differentiable in (µ, Σ), and the following holds: +5 + +(D.1) For any component (µ′, Σ′) ∈ Θ×Ω, if we have real coefficients η, τα for all α = (α1, α2), α1 ∈ +Nd1, α2 ∈ Nd2×d2, |α| = |α1|1 + |α2| ≤ 1 such that +ηh0(x) + +� +|α|≤1 +τα +∂|α|f +∂µα1∂Σα2 (x|µ′, Σ′) = 0 +for all x ∈ X, then η = τα = 0 for all |α| ≤ 1. +We can verify that the examples from part (a) and part (b) of Example 1 satisfy the first-order +distinguishability condition. +Next, we introduce a notion of uniform Lipschitz condition in the +following definition. +Definition 3 (Uniform Lipschitz). We say that f admits uniform Lipschitz condition up to the +first order if the following holds: there are positive constants δ1, δ2 such that for any R1, R2, R3 > 0, +γ1 ∈ Rd1, γ2 ∈ Rd2×d2, R1 ≤ λ1/2 +min(Σ1) ≤ λ1/2 +max(Σ2) ≤ R2, ∥µ∥ ≤ R3, µ1, µ2 ∈ Θ, Σ1, Σ2 ∈ Ω, we +can find positive constants C(R1, R2) and C(R3) such that for all x ∈ X, +����γ⊤ +1 +�∂f +∂µ(x|µ1, Σ) − ∂f +∂µ(x|µ2, Σ) +����� ≤ C(R1, R2)∥µ1 − µ2∥δ1∥γ1∥, +����tr +�� ∂f +∂Σ(x|µ, Σ1) − ∂f +∂Σ(x|µ, Σ2) +�⊤ +γ2 +����� ≤ C(R3)∥Σ1 − Σ2∥δ2∥γ2∥. +Now, equipped with the strong distinguishability in the first order and uniform Lipschitz conditions, +we have the following results characterizing the behavior of V (pG, pG∗) regarding the variation of G +and G∗. +Theorem 2. Assume that f is distinguishable from h0 up to the first order. Furthermore, f admits +uniform Lipschitz condition up to the first order. For any G and G∗, we define +K(G, G∗) := |λ − λ∗| + (λ + λ∗)∥(µ, Σ) − (µ∗, Σ∗)∥. +Then, the following holds: +C.K(G, G∗) ≤ V (pG, pG∗) ≤ C1.K(G, G∗), +for all G and G∗ where C and C1 are two positive constants depending only on Θ, Ω, and h0. +See Appendix A.1 for the proof of Theorem 2. Since the MLE approach yields the convergence rate +n−1/2 up to some logarithmic factor for pG∗ under the first order uniform Lipschitz condition of f, +the result of Theorem 2 directly yields the convergence rate n−1/2 up to some logarithmic factor +for G∗ under metric K. This entails that the estimation of weight λ∗ converges at rate n−1/2 up to +some logarithmic factor while the convergence rate of estimating (µ∗, Σ∗) is typically much slower +than n−1/2 as it depends on the rate of convergence of λ∗ to 0 (cf. Theorem 5). +3.2 +Non-distinguishable Settings +When f is not distinguishable to h0 up to the first order, the bound in Theorem 2 may not hold +in general. In this section, we investigate the inverse bounds under the specific settings of non- +distinguishable in the first-order models when h0 belongs to the family f(·|µ, Σ), i.e., h0(x) = +6 + +f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ. Our studies are divided into two separate regimes of f: +the first setting is when f is strongly identifiable in the second order (cf. Definition 4), while the +second setting is when it is not. For the simplicity of the presentation in the paper, we define +(∆µ, ∆Σ) = (µ − µ0, Σ − Σ0) for any element (µ, Σ) ∈ Θ × Ω. +Definition 4 (Strong Identifiability). We say that f is strongly identifiable in the second order +if f is twice differentiable in (µ, Σ) and the following holds: +(D.2) For any positive integer k, given k distinct pairs (µ1, Σ1), . . . , (µk, Σk), if we have α(i) +η +such +that +2 +� +ℓ=0 +� +|η|=ℓ +k +� +i=1 +α(i) +η +∂|η|f +∂µη1∂Ση2 (x|µi, Σi) = 0, +for almost all x ∈ X, then α(i) +η += 0 for all i ∈ [k] and |η| ≤ 2. +3.2.1 +Strongly Identifiable Settings +Now, we have the following result regarding the lower bound of V (pG, pG∗) under the strongly +identifiable settings of f. +Theorem 3. Assume that h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ and f is strongly +identifiable in the second order and admits uniform Lipschitz condition up to the second order. +Furthermore, we denote +D(G, G∗) := λ∥(∆µ, ∆Σ)∥2 + λ∗∥(∆µ∗, ∆Σ∗)∥2 − min {λ, λ∗} +� +∥(∆µ, ∆Σ)∥2∥(∆µ∗, ∆Σ∗)∥2� ++ +� +λ∥(∆µ, ∆Σ)∥ + λ∗∥(∆µ∗, ∆Σ∗)∥ +� +∥(µ, Σ) − (µ∗, Σ∗)∥, +for any G and G∗. Then, there exists a positive constant C depending only on Θ, Ω, and (µ0, Σ0) +such that +V (pG, pG∗) +≥ +C.D(G, G∗), +for all G and G∗. +The proof of Theorem 3 is in Appendix A.2. Several remarks regarding Theorem 3 are in order: +(i) For any G and G∗, by defining +D(G, G∗) := |λ∗ − λ|∥(∆µ, ∆Σ)∥∥(∆µ∗, ∆Σ∗)∥ ++ ∥(µ, Σ) − (µ∗, Σ∗)∥ +� +λ∥(∆µ, ∆Σ)∥ + λ∗∥(∆µ∗, ∆Σ∗)∥ +� +, +we can verify that 1/2 ≤ D(G, G∗)/D(G, G∗) ≤ 2, i.e., D(G, G∗) ≍ D(G, G∗). The reason that we +prefer to use the formation of D(G, G∗) over that of D(G, G∗) is not only due to the convenience of +the proof argument of Theorem 3 later in Appendix A but also due to its partial connection with +Wasserstein metric that we are going to discuss in the next remark. +7 + +(ii) When f is a multivariate location family and is identical to h0, i.e., µ0 = 0, it was demonstrated +recently in [12] that +V (pG, pG∗) ≳ |λ − λ∗|∥µ∥∥µ∗∥ + (λ∗∥µ∗∥ + λ∥µ∥)∥µ − µ∗∥, +(7) +which is also the key result for establishing the convergence rates of parameter estimation in their +work. However, their proof technique only works for the location family and it is unclear what is +the sufficient condition for the family of density functions beyond the location family such that the +inequality (7) will hold. As the location family is strongly identifiable in the second order, we can +verify that the lower bound in Theorem 3 and inequality (7) are in fact similar. Therefore, the +result in Theorem 2 gives a generalization of inequality (7) in [12] under the strongly identifiable in +the second order setting of f. +(iii) As being indicated in [12], we can further lower bound the right hand side of inequality (7) in +terms of the second order Wasserstein metric W2 [30] between G and G∗ when we present G and +G∗ as two discrete probability measures with two components. In particular, with an abuse of the +notations we denote that G = (1 − λ)δ(µ0,Σ0) + λδ(µ,Σ) and G∗ = (1 − λ)δ(µ0,Σ0) + λδ(µ∗,Σ∗), i.e., we +think of G and G∗ as two mixing measures with one fixed atom to be (µ0, Σ0). In light of Lemma +1 in Appendix C, we have +W 2 +2 (G, G∗) ≍ λ∥(∆µ, ∆Σ)∥2 + λ∗∥(∆µ∗, ∆Σ∗)∥2 +− min {λ, λ∗} +� +∥(∆µ, ∆Σ)∥2 + ∥(∆µ∗, ∆Σ∗)∥2 +� ++ min {λ, λ∗} ∥∥(µ, Σ) − (µ∗, Σ∗)∥2. +Therefore, D(G, G∗) and W 2 +2 (G, G∗) share the similar term λ∥(∆µ, ∆Σ)∥2 + λ∗∥(∆µ∗, ∆Σ∗)∥2 − +min {λ, λ∗} +� +∥(∆µ, ∆Σ)∥2 + ∥(∆µ∗, ∆Σ∗)∥2 +� +in their formulations. However, as λ∥(∆µ, ∆Σ)∥ + +λ∗∥(∆µ∗, ∆Σ∗)∥ ≥ min {λ, λ∗} ∥∥(µ, Σ) − (µ∗, Σ∗)∥, the remaining term in D(G, G∗) is stronger +than that of W 2 +2 (G, G∗). Moreover, as λ = λ∗, we further obtain that +D(G, G∗) +W 2 +2 (G, G∗) ≍ ∥(∆µ, ∆Σ)∥ + ∥(∆µ∗, ∆Σ∗)∥ +∥(µ, Σ) − (µ∗, Σ∗)∥ +. +Hence, as long as the right hand side term in the above display goes to ∞, i.e., ||(∆µ + ∆µ∗, ∆Σ + +∆Σ∗)|| → 0, we will have D(G, G∗)/W 2 +2 (G, G∗) → ∞. This strong refinement of Wasserstein metric +is possible due to the special structure of G and G∗ as one of their components is always fixed to +be (µ0, Σ0). +(iv) Under the setting when G∗ is varied, d1 = 1, and d2 = 0, by means of Fatou’s lemma the result +from Theorem 4.6 in [14] yields the following bound +V (pG, pG∗) ≥ C′.W 3 +3 (G, G∗), +(8) +if the kernel density function f is 4-strongly identifiable (cf. Definition 2.2 in [14]) and satisfies +uniform Lipschitz condition up to the fourth order where C′ is some positive constant depending +only on G and G∗. Since D(G, G∗) ≳ W 2 +2 (G, G∗) ≥ W 2 +1 (G, G∗) ≳ W 3 +3 (G, G∗), it indicates that the +bound in Theorem 3 is much tighter than that in equation (8). The loss of efficiency in equation (8) +is again due to the special structures of G and G∗ as one of their components is always fixed to be +8 + +(µ0, Σ0). +(v) When G∗ is fixed such that (µ∗, Σ∗) ̸= (µ0, Σ0), we can verify that +C1(G∗)K(G, G∗) ≤ V (pG, pG∗) ≤ C2(G∗)K(G, G∗) +where C1(G∗) and C2(G∗) are some positive constants depending only on G∗, Θ, Ω, and (µ0, Σ0). +Since D(G, G∗) ≲ K(G, G∗), the weaker bound of V (pG, pG∗) in Theorem 3 can be interpreted as a +compensation for the variation of G∗ around (0, (µ0, Σ0)) within the space (0, 1)×(Θ×Ω\ {(µ0, Σ0)}). +Unlike the convergence rate results from the strongly distinguishable in the first order setting be- +tween f and h0 in Theorem 2, the convergence rate of λ∗ under the setting of Theorem 3 will depend +on the rate of convergence of ∥(∆µ∗, ∆Σ∗)∥2 to 0 (cf. Theorem 6). Additionally, the convergence +rate of estimating (µ∗, Σ∗) will be determined based on the convergence rates of λ∗ and (∆µ∗, ∆Σ∗) +to 0. +3.2.2 +Weakly Identifiable Settings +Thus far, as h0 belongs to the family f, our results regarding the lower bounds between pG and +pG0 under Total Variation distance rely on the strongly identifiable in the second order assumption +of kernel f. However, there are various families of density functions that do not satisfy such an +assumption, which we refer to as the weakly identifiable condition. To illustrate the non-uniform +natures of V (pG, pG∗) under the weakly identifiable condition of f, we consider specifically a popular +setting of f in this section: multivariate location-covariance Gaussian kernel. +Location-covariance multivariate Gaussian kernel: As indicated in the previous work in the +literature [5, 19, 16], if f is a family of multivariate location-covariance Gaussian distributions in d +dimension, it exhibits the following partial differential equation (PDE) with respect to the location +and covariance parameter +∂2f +∂µ2 (x|µ, Σ) = 2 ∂f +∂Σ(x|µ, Σ), +(9) +for any x ∈ Rd and (µ, Σ) ∈ Θ×Ω. We can verify that this structure leads to the loss of the second- +order strong identifiability condition of the Gaussian kernel. Note that, the PDE structure of the +Gaussian kernel has been shown to lead to very slow convergence rates of parameter estimation +under general over-fitted Gaussian mixture models (cf. Theorem 1.1 in [16]). For the setting of +the matrix-variate deviated model, since the parameters λ∗ and (µ∗, Σ∗) are allowed to vary with +the sample size, we may expect that the estimation of these parameters will also suffer from the +very slow rate. In fact, we achieve the following lower bound of V (pG, pG∗) under the multivariate +location-covariance Gaussian kernel. +Theorem 4. Assume that h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ and f is a family of +multivariate location-covariance Gaussian distributions. We denote +Q(G, G∗) := λ(∥∆µ∥4 + ∥∆Σ∥2) + λ∗(∥∆µ∗∥4 + ∥∆Σ∗∥2) +− min {λ, λ∗} +� +∥∆µ∥4 + ∥∆Σ∥2 + ∥∆µ∗∥4 + ∥∆Σ∗∥2 +� ++ +� +λ(∥∆µ∥2 + ∥∆Σ∥) + λ∗(∥∆µ∗∥2 + ∥∆Σ∗∥) +� +× +� +∥µ − µ∗∥2 + ∥Σ − Σ∗∥ +� +, +9 + +for any G and G∗. Then, we can find a positive constant C depending only on Θ, Ω, and (µ0, Σ0) +such that +V (pG, pG∗) ≥ C.Q(G, G∗), +for any G and G∗. +See Appendix A.3 for the proof of Theorem 4. A few comments with Theorem 4 are in order. +(i) Different from the formulation of D(G, G∗) in Theorem 3 where we have the same power between +µ and Σ, there is a mismatch of power between ∥∆µ∥2, ∥∆µ∗∥2 and ∥∆Σ∥, ∥∆Σ∗∥ in the formulation +of Q(G, G∗). This interesting phenomenon is mainly due to the structure of the partial differential +equation (9) where the second-order derivative of the location parameter and first-order derivative +of the covariance parameter is linearly dependent. +(ii) If we denote +Q′(G, G∗) := λ(∥∆µ∥4 + ∥∆Σ∥2) + λ∗(∥∆µ∗∥4 + ∥∆Σ∗∥2) +− min {λ, λ∗} +� +∥∆µ∥4 + ∥∆Σ∥2 + ∥∆µ∗∥4 + ∥∆Σ∗∥2 +� ++ min {λ, λ∗} +� +∥µ − µ∗∥4 + ∥Σ − Σ∗∥2 +� +, +then we can verify that Q(G, G∗) ≳ Q′(G, G∗) for any G, G∗. +If we treat G and G∗ as two- +components measures as in the remark (iii) after Theorem 3, we would have +Q′(G, G∗) ≍ W 4 +4 (G1, G1,∗) + W 2 +2 (G2, G2,∗), +(10) +where G1 = (1 − λ)δµ′ +0 + λδµ, G2 = (1 − λ)δΣ′ +0 + λδΣ and similarly for G1,∗ and G2,∗. +Here, +(µ0, Σ0) = (µ′ +0, Σ′ +0), and W2, W4 are respectively second and fourth order Wasserstein metrics. The +formulations of Q′(G, G∗), therefore, can be thought as a combination of two Wasserstein metrics: +one is with only parameter µ and another one is only with parameter Σ. The division into two +Wasserstein metrics can be traced back again to the PDE structure in equation (9). +If λ = λ∗ and (∥∆µ∥2 +∥∆µ∗∥2 +∥∆Σ∥+∥∆Σ∗∥)/ +� +∥µ−µ∗∥2 +∥Σ−Σ∗∥ +� +→ ∞, we will have that +Q(G, G∗)/Q′(G, G∗) → ∞. It proves that the result from Theorem 4 under multivariate setting of +Gaussian kernel is a strong refinement of the summation of Wasserstein metrics regarding location +and covariance parameter in equation (10). +(iii) Similar to the comments after Theorem 3, as G∗ is fixed and (µ∗, Σ∗) ̸= (µ0, Σ0) we also can +verify that +C1(G∗)K(G, G∗) ≤ V (pG, pG∗) ≤ C2(G∗)K(G, G∗) +where C1(G∗) and C2(G∗) are some positive constants depending only on G∗, Θ, Ω, and (µ0, Σ0). +When G∗ is varied, as long as G∗ does not converge to (0, (µ0, Σ0)), then we will still have +inf +G∗ C(G∗) > 0, i.e., metric K is still sufficient to capture the variation of pG around pG∗ under +10 + +L2 norm. However, when G∗ indeed converges to (0, (µ0, Σ0)), our result in Theorem 4 implies that +a much stronger compensation of efficiency is needed to capture the variation of pG around pG∗ +under Total Variation distance. +A consequence of Theorem 4 is that the convergence rate of estimating λ∗ is now determined by +∥∆µ∗∥4+∥∆Σ∗∥2, instead of ∥(∆µ∗, ∆Σ∗)∥2 as in the strongly identifiable in the second order setting +of f. Furthermore, we also encounter a phenomenon that the rate of convergence of estimating Σ∗ +is much faster than that of estimating µ∗. In particular, estimating Σ∗ depends on the rate in which +λ∗(∥µ∗∥2 + ∥Σ∗∥) converges to 0 while estimating µ∗ relies on square root of this rate (cf. Theorem +7). +4 +Minimax Lower Bounds and Convergence Rates of Parameter +Estimation +In this section, we study the convergence rates of MLE �Gn as well as minimax lower bounds of +estimating G∗ under various settings of h0 and f. Firstly, we start with the distinguishable regime +of h0 and f. +Theorem 5. (Distinguishable settings) Assume that classes of densities h0 and f satisfy the +conditions in Theorem 2. Then, we achieve that +(a) (Minimax lower bound) Assume that f satisfies the following assumption S.1: +(S.1) +sup +∥(µ,Σ)−(µ′,Σ′)∥≤c0 +� +� +∂|α|f(x|µ,Σ) +∂µα1∂Σα2 +�2 +f(x|µ′, Σ′) +dx < ∞ for some sufficiently small c0 > 0, where α1 ∈ +Nd1, α2 ∈ Nd2 in the partial derivative of f take any combination such that |α| = |α1| + |α2| ≤ 1. +Then for any r < 1, there exist two universal positive constants c1 and c2 such that +inf +� +Gn∈Ξ +sup +G∈Ξ +EpG +� +λ2∥(�µn, �Σn) − (µ, Σ)∥2 +� +≥ c1n−1/r, +inf +� +Gn∈Ξ +sup +G∈Ξ +EpG +� +|�λn − λ|2 +� +≥ c2n−1/r. +Here, the infimum is taken over all sequences of estimates �Gn = (�λn, �µn, �Σn). +(b) (MLE rate) Let �Gn be the MLE defined in equation (3), and the family {pG : G ∈ Ξ} satisfies +condition A2. Then, we have the convergence rate for the MLE: +sup +G∗∈Ξ +EpG∗ +� +(λ∗)2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 +� +≲ log2 n +n +, +sup +G∗∈Ξ +EpG∗ +� +|�λn − λ∗|2 +� +≲ log2 n +n +. +Proof of Theorem 5 is in Appendix B.1. The results of Theorem 5 imply that even though we still +can estimate λ∗ at the standard rate n−1/2, the convergence rate of (�µn, �Σn) to (µ∗, Σ∗) strictly +11 + +depends on the vanishing rate of λ∗ to 0. Therefore, the convergence rate of estimating (µ∗, Σ∗) can +be generally slower than n−1/2 as long as λ∗ goes to 0 at a rate slower than n−1/2. Our next result +investigates the behaviors of �Gn in the case h0 is identical to f, and f is strongly identifiable up to +the second order. +Theorem 6. (Strongly identifiable and non-distinguishable settings) Assume that classes +of densities h0 and f satisfy the conditions in Theorem 3. We define +Ξ1(ln) := +� +G = (λ, µ, Σ) ∈ Ξ : +ln +min +1≤i≤d1 +1≤u,v≤d2 +{|(∆µ)i|2, |(∆Σ)uv|2}√n ≤ λ +� +, +for any sequence {ln}. Then, we achieve +(a) (Minimax lower bound) Assume that f satisfies assumption S.1 in Theorem 5. Then for any +r < 1 and sequence {ln}, there exist two universal positive constants c1 and c2 such that +inf +� +Gn∈Ξ +sup +G∈Ξ1(ln) +EpG +� +λ2∥(∆µ, ∆Σ)∥2∥(�µn, �Σn) − (µ, Σ)∥2 +� +≥ c1n−1/r, +inf +� +Gn∈Ξ +sup +G∈Ξ1(ln) +EpG +� +∥∆µ, ∆Σ)∥4|�λn − λ|2 +� +≥ c2n−1/r. +(b) (MLE rate) Let �Gn be the MLE defined in equation (3), and the family {pG : G ∈ Ξ} satisfies +condition A2. Then, for any sequence {ln} such that ln/ log n → ∞, +sup +G∗∈Ξ1(ln) +EpG∗ +� +(λ∗)2∥(∆µ∗, ∆Σ∗)∥2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 +� +≲ log2 n +n +, +sup +G∗∈Ξ1(ln) +EpG∗ +� +∥(∆µ∗, ∆Σ∗)∥4|�λn − λ∗|2 +� +≲ log2 n +n +. +Proof of Theorem 6 is in Appendix B.2. The results of part (b) are the generalization of those in +Theorem 3.1 and Theorem 3.2 in [12] to the setting of strongly identifiable in the second-order kernel. +The condition regarding the lower bound of λ in the formation of Ξ1(ln) is necessary to guarantee +that (�µn, �Σn) and �λn are consistent estimators of (µ∗, Σ∗) and λ∗ respectively. In particular, from +the results in equation (26) of the proof of Theorem 6, we have for any G∗ ∈ Ξ that +EpG∗(λ∗)2∥(∆µ∗, ∆Σ∗)∥2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 ≲ log2 n +n +. +Therefore, for any 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2 we get +EpG∗ +��(∆�µn)i +(∆µ∗)i +− 1 +�2� +≲ +log2 n +n(λ∗)2 {(∆µ∗)i}4 , +EpG∗ +��(∆�Σn)uv +(∆Σ∗)uv +− 1 +�2� +≲ +log2 n +n(λ∗)2 {(∆Σ∗)uv}4 . +12 + +It indicates that +log n +√nλ∗ +min +1≤i≤d1,1≤u,v≤d2 {|(∆µ∗)i|2, |(∆Σ∗)i|2} → 0 +for the left-hand-side terms of the above display to go to 0 for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2. +The results of Theorem 6 imply that as long as the kernel functions are strongly identifiable in +the second order, the convergence rates of �µn to µ∗ and �Σn to Σ∗ are similar, which depend on +the vanishing rate of (λ∗)2∥(∆µ∗, ∆Σ∗)∥2 to 0. In our next result of location-covariance multivari- +ate Gaussian distribution, we will demonstrate that such uniform convergence rates of different +parameters no longer hold. +Theorem 7. (Weakly identifiable and non-distinguishable settings) Assume that f is a +family of location-covariance multivariate Gaussian distributions, and h0(x) = f(x|µ0, Σ0) for some +(µ0, Σ0) ∈ Θ × Σ. We define +Ξ2(ln) := +� +G = (λ, µ, Σ) ∈ Ξ : +ln +min +1≤i≤d,1≤u,v≤d {|(∆µ)i|4, |(∆Σ)uv|2} √n ≤ λ +� +, +for any sequence {ln}. Then, the following holds: +(a) (Minimax lower bound) For any r < 1 and sequence {ln}, there exist two universal positive +constants c1 and c2 such that +inf +� +Gn∈Ξ +sup +G∈Ξ2(ln) +EpG +� +λ2 � +∥∆µ∥4 + ∥∆Σ∥2� � +∥�µn − µ∥4 + ∥�Σn − Σ∥2�� +≥ c1n−1/r, +inf +� +Gn∈Ξ +sup +G∈Ξ2(ln) +EpG +� � +∥∆µ∥8 + ∥∆Σ∥4� +|�λ − λ|2 +� +≥ c2n−1/r. +(b) (MLE rate) Let �Gn be the estimator defined in (3). Then, for any sequence {ln} such that +ln/ log n → ∞ the following holds +sup +G∗∈Ξ2(ln) +EpG∗ +� +(λ∗)2 � +∥∆µ∗∥4 + ∥∆Σ∗∥2� � +∥�µn − µ∗∥4 + ∥�Σn − Σ∗∥2�� +≲ log2 n +n +, +sup +G∗∈Ξ2(ln) +EpG∗ +� � +∥∆µ∗∥8 + ∥∆Σ∗∥4� +|�λn − λ∗|2 +� +≲ log2 n +n +. +Proof of Theorem 7 is in Appendix B.3. A few comments are in order: +(i) Similar to the argument after Theorem 6, the condition regarding λ in the formulation of Ξ2(ln) +is to guarantee that (�µn, �Σn) and �λn are consistent estimators of (µ∗, Σ∗) and λ∗, respectively. +(ii) The results of part (b) indicate that the convergence rate of estimating Σ∗ is generally much +faster than that of estimating µ∗ regardless of the circumstance of (λ∗)2 � +∥∆µ∗∥4 + ∥∆Σ∗∥2� +. The +non-uniformity of these convergence rates is mainly due to the structure of the partial differential +13 + +equation in (9) where the second order derivative of the location parameter and the first order +derivative of covariance parameter correlates. +(iii) From the results of part (b), it is clear that when ∥∆µ∗∥+∥∆Σ∗∥ ̸→ 0, i.e., (µ∗, Σ∗) → (µ, Σ) ̸= +(µ0, Σ0), and λ∗ ̸→ 0, the convergence rate of �λn to λ∗ is n−1/2. Furthermore, by using the result +from part (a) of Proposition 4 we can verify that +sup +G∗ +EpG∗ +� +(λ∗)2 � +∥�µn − µ∗∥2 + ∥�Σn − Σ∗∥2�� +≲ log2 n +n +, +where the supremum is taken over {G∗ ∈ Ξ2(ln) : K(G∗, G) ≤ ǫ}, and G = (λ, µ, Σ), λ∗ → λ, and +ǫ is some sufficiently small positive constant. Since λ ̸= 0, we achieve the optimal convergence +rate n−1/2 of estimating (µ∗, Σ∗) within a sufficiently small neighborhood of G under metric K. +These results imply that even though the convergence rate of estimating G∗ may be extremely slow +when G∗ moves over the whole space Ξ2(ln) (global convergence), such convergence rate can be at +standard rate n−1/2 when G∗ moves within a sufficiently small neighborhood of some appropriate +parameters G (local convergence). +As we have seen from the convergence rate results from location-covariance multivariate Gaussian +distributions, the PDE structure (9) plays a key role in the slow convergence rates of location and +covariance parameters as well as the mismatch of orders of these rates. +5 +Conclusion +In this paper, we establish the rate for estimating true parameters in the matrix-covariate deviated +model (2) by using the MLE method. +During our derivation, we have to overcome two major +obstacles, which are firstly the interaction between the null hypothesis density h0 and the alternative +density function f, and secondly the likelihood of the deviated proportion λ∗ vanishing to either +endpoints of the interval [0, 1]. To this end, we introduce a notion of distinguishability to control +the linear independent relation between h0 and f, and finally achieve the optimal convergence rate +of the MLE under both distinguishable and non-distinguishable settings. +Acknowledgements +Nhat Ho acknowledges support from the NSF IFML 2019844 and the NSF AI Institute for Founda- +tions of Machine Learning. +References +[1] T. Cai, X. J. Jeng, and J. Jin. Optimal detection of heterogeneous and heteroscedastic mixtures. +Journal of the Royal Statistical Society: Series B (Statistical Methodology), 73(5):629–662, 2011. +(Cited on page 3.) +[2] T. Cai, J. Jin, and M. G. Low. Estimation and confidence sets for sparse normal mixtures. +Annals of Statistics, 35(6):2421–2449, 2007. (Cited on page 3.) +14 + +[3] T. Cai and Y. Wu. Optimal detection of sparse mixtures against a given null distribution. +IEEE Transactions on Information Theory, 60(4):2217 – 2232, 2014. (Cited on page 3.) +[4] G. Casella and R. L. Berger. Statistical inference. Cengage Learning, 2021. (Cited on page 1.) +[5] H. Chen and J. Chen. Tests for homogeneity in normal mixtures in the presence of a structural +parameter. Statistica Sinica, 13:351–365, 2003. (Cited on page 9.) +[6] H. Chen, J. Chen, and J. D. Kalbfleisch. A modified likelihood ratio test for homogeneity in +finite mixture models. Journal of the Royal Statistical Society: Series B (Statistical Methodol- +ogy), 63(1):19–29, 2001. (Cited on page 3.) +[7] J. Chen, P. Li, and Y. Fu. Inference on the order of a normal mixture. Journal of the American +Statistical Association, 107:1096–1105, 2012. (Cited on page 3.) +[8] J. H. Chen. +Optimal rate of convergence for finite mixture models. +Annals of Statistics, +23(1):221–233, 1995. (Cited on pages 2 and 17.) +[9] W. G. Cochran. The χ2 test of goodness of fit. The Annals of mathematical statistics, pages +315–345, 1952. (Cited on page 1.) +[10] N. Deb, S. Saha, A. Guntuboyina, and B. Sen. Two-component mixture model in the presence +of covariates. Journal of the American Statistical Association, 117(540):1820–1834, 2022. (Cited +on page 3.) +[11] D. Donoho and J. Jin. Higher criticism for detecting sparse heterogeneous mixtures. Annals +of Statistics, 32(3):962–994, 2004. (Cited on page 3.) +[12] S. Gadat, J. Kahn, C. Marteau, and C. Maugis-Rabusseau. +Parameter recovery in two- +component contamination mixtures: The l2 strategy. In Annales de l’Institut Henri Poincaré, +Probabilités et Statistiques, volume 56, pages 1391–1418. Institut Henri Poincaré, 2020. (Cited +on pages 3, 8, 12, 34, 35, and 37.) +[13] E. Giné and R. Nickl. +Mathematical foundations of infinite-dimensional statistical models. +Cambridge university press, 2021. (Cited on page 4.) +[14] P. Heinrich and J. Kahn. Strong identifiability and optimal minimax rates for finite mixture +estimation. Annals of Statistics, 46(6A):2844–2870, 2018. (Cited on pages 2, 8, and 17.) +[15] N. Ho and L. Nguyen. Singularity structures and impacts on parameter estimation in finite +mixtures of distributions. SIAM Journal on Mathematics of Data Science, 1(4):730–758, 2019. +(Cited on page 2.) +[16] N. Ho and X. Nguyen. Convergence rates of parameter estimation for some weakly identifiable +finite mixtures. Annals of Statistics, 44:2726–2755, 2016. (Cited on pages 2, 9, and 25.) +[17] N. Ho and X. Nguyen. On strong identifiability and convergence rates of parameter estimation +in finite mixtures. Electronic Journal of Statistics, 10:271–307, 2016. (Cited on pages 2 and 17.) +[18] H. Kasahara and K. Shimotsu. Non-parametric identification and estimation of the number +of components in multivariate mixtures. +Journal of the Royal Statistical Society: Series B +(Statistical Methodology), 76(1):97–111, 2014. (Cited on page 3.) +15 + +[19] H. Kasahara and K. Shimotsu. Testing the number of components in normal mixture regression +models. Journal of the American Statistical Association, 2014. (Cited on page 9.) +[20] H. Kasahara and K. Shimotsu. Testing the number of components in normal mixture regression +models. Journal of the American Statistical Association, 110(512):1632–1645, 2015. (Cited on +page 3.) +[21] P. Li and J. Chen. Testing the order of a finite mixture. Journal of the American Statistical +Association, 105(491):1084–1092, 2010. (Cited on page 3.) +[22] Q. Liu, J. Lee, and M. Jordan. A kernelized stein discrepancy for goodness-of-fit tests. In Pro- +ceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings +of Machine Learning Research, pages 276–284. PMLR, 20–22 Jun 2016. (Cited on page 1.) +[23] T. Manole and N. Ho. Refined convergence rates for maximum likelihood estimation under finite +mixture models. In Proceedings of the 39th International Conference on Machine Learning, +volume 162 of Proceedings of Machine Learning Research, pages 14979–15006. PMLR, 17–23 +Jul 2022. (Cited on page 2.) +[24] X. Nguyen. Convergence of latent mixing measures in finite and infinite mixture models. Annals +of Statistics, 4(1):370–400, 2013. (Cited on pages 2 and 17.) +[25] R. Patra and B. Sen. +Estimation of a two-component mixture model with applications to +multiple testing. Journal of the Royal Statistical Society: Series B (Statistical Methodology), +78(4):869–893, 2016. (Cited on page 3.) +[26] A. Schrab, B. Guedj, and A. Gretton. KSD aggregated goodness-of-fit test. In A. H. Oh, +A. Agarwal, D. Belgrave, and K. Cho, editors, Advances in Neural Information Processing +Systems, 2022. (Cited on page 1.) +[27] S. Talts, M. Betancourt, D. Simpson, A. Vehtari, and A. Gelman. Validating bayesian inference +algorithms with simulation-based calibration, 2018. (Cited on page 1.) +[28] S. van de Geer. Empirical Processes in M-estimation, volume 6. Cambridge university press, +2000. (Cited on pages 4 and 5.) +[29] N. Verzelen and E. Arias-Castro. Detection and feature selection in sparse mixture models. +Annals of Statistics, 45(5):1920–1950, 2017. (Cited on page 3.) +[30] C. Villani. Topics in Optimal Transportation. American Mathematical Society, 2003. (Cited on +page 8.) +[31] J. Yang, V. Rao, and J. Neville. A stein-papangelou goodness-of-fit test for point processes. +In Proceedings of the Twenty-Second International Conference on Artificial Intelligence and +Statistics, volume 89 of Proceedings of Machine Learning Research, pages 226–235. PMLR, +16–18 Apr 2019. (Cited on page 1.) +16 + +Supplement to “Optimal Rate for Parameter Estimation in +Matrix-variate Deviated Models” +In this supplementary material, we will present the proofs for the convergence rates of densities in +Appendix A, while those for minimax lower bounds and convergence rates of parameter estimation +are left in Appendix B. Finally, we provide a necessary lemma for those results along with its proof +in Appendix C. +A +Proofs for Convergence Rates of Densities +In this appendix, we provide proofs for key results on the convergence rates of densities presented +in Section 3. +A.1 +Proof of Theorem 2 +The second inequality in Theorem 2 is straightforward from the equivalent form of W1(G, G∗) in +Lemma 1 (see Appendix C). Therefore, we will only focus on establishing the first inequality in that +theorem. We start with the following key result: +Proposition 2. Given the assumptions in Theorem 3 and G = (λ, µ, Σ) such that λ ∈ [0, 1] and +(µ, Σ) can be equal to (µ0, Σ0). Then, we have +lim +ǫ→0 inf +G,G∗ +�V (pG, pG∗) +K(G, G∗) : K(G, G) ∨ K(G∗, G) ≤ ǫ +� +> 0. +Proof. The high level idea of the proof of Proposition 3 is to utilize the Taylor expansion techniques +previously employed in [8, 24, 17, 14]. Indeed, following Fatou’s argument from Theorem 3.1 in [17], +to obtain the conclusion of Proposition 3 it suffices to demonstrate that +lim +ǫ→0 inf +G,G∗ +�∥pG − pG∗∥∞ +K(G, G∗) +: K(G, G) ∨ K(G∗, G) ≤ ǫ +� +> 0. +Assume that the above conclusion does not hold. +It implies that we can find two sequences +Gn = (λn, µn, Σn) and G∗,n = (λ∗ +n, µ∗ +n, Σ∗ +n) such that K(Gn, G) → 0, K(G∗,n, G) → 0, and +∥pGn − pG∗,n∥∞/K(Gn, G∗,n) → 0 as n → ∞. Now, we only consider the most challenging set- +ting of (µn, Σn) and (µ∗ +n, Σ∗ +n) when they share the same limit point (µ′, Σ′). The other settings of +these two components can be argued in the same fashion. Here, (µ′, Σ′) is not necessarily equal to +(µ0, Σ0) or (µ, Σ) as λn, λ∗ +n can go to 0 or 1 in the limit. Under that setting, by means of Taylor +expansion up to the first order we obtain +pGn(x) − pG∗,n(x) +K(Gn, G∗,n) += +(λ∗ +n − λn)[h0(x|µ0, Σ0) − f(x|µ∗ +n, Σ∗ +n)] + λn[f(x|µn, Σn) − f(x|µ∗ +n, Σ∗ +n)] +K(Gn, G∗,n) += +(λ∗ +n − λn)[h0(x|µ0, Σ0) − f(x|µ∗ +n, Σ∗ +n)] +K(Gn, G∗,n) ++ +λn +� � +|α|=1 +(µn − µ∗ +n)α1(Σn − Σ∗ +n)α2 +α! +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) + R1(x) +� +K(Gn, G∗,n) +, +17 + +where R1(x) is Taylor remainder and α = (α1, α2) in the summation of the second equality sat- +isfies α1 = (α(1) +1 , . . . , α(1) +d1 ) ∈ Nd1, α2 = (α(2) +uv ) ∈ Nd2×d2, |α| = +d1 +� +i=1 +α(1) +i ++ +� +1≤u,v≤d2 +α(2) +uv , and +α! = +d1 +� +i=1 +α(1) +i ! +� +1≤u,v≤d2 +α(2) +uv !. +As f admits the first order uniform Lipschitz condition, we have +R1(x) = O(∥(µn, Σn) − (µ∗ +n, Σ∗ +n)∥1+γ) for some γ > 0, which implies that +λn|R1(x)|/K(Gn, G∗,n) = O(∥(µn, Σn) − (µ∗ +n, Σ∗ +n)∥γ) → 0 +as n → ∞. Therefore, we can treat [pGn(x) − pG∗,n(x)]/K(Gn, G∗,n) as the linear combination of +h0(x|θ0, Σ0) and +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) when |α| ≤ 1. Assume that the coefficients of these terms +go to 0. Then, by studying the coefficients of h0(x|θ0, Σ0), ∂f +∂µi +(x|µ0, Σ0), and +∂f +∂Σuv +(x|µ0, Σ0), we +achieve +(λ∗ +n − λn)/K(Gn, G∗,n) → 0, λn(µn − µ∗ +n)i/K(Gn, G∗,n) → 0, λn(Σn − Σ∗ +n)uv/K(Gn, G∗,n) → 0 +for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2 where (a)i denotes the i-th element of vector a and Auv denotes +the (u, v)-th element of matrix A. It would imply that +(λn + λ∗ +n)∥(µn, Σn) − (µ∗ +n, Σ∗ +n)∥/K(Gn, G∗,n) → 0. +Therefore, we achieve +1 = +� +|λ∗ +n − λn| + (λn + λ∗ +n)∥(µn, Σn) − (µ∗ +n, Σ∗ +n)∥ +� +/K(Gn, G∗,n) → 0, +a contradiction. Therefore, not all the coefficients of h0(x|θ0, Σ0) and +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) go to 0. +If we denote mn to be the maximum of the absolute values of the coefficients of h0(x|θ0, Σ0) and +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n), then we get 1/mn ̸→ ∞ as n → ∞, i.e., 1/mn is uniformly bounded. Hence, +we achieve for all x that +1 +mn +pGn(x) − pG∗,n(x) +K(Gn, G∗,n) +→ ηf(|µ0, Σ0) + +� +|α|≤1 +τα +∂|α|f +∂µα1∂Σα2 (x|µ′, Σ′) = 0 +for some coefficients η and τα such that they are not all 0. However, as f is distinguishable from h0 +up to the first order, the above equation indicates that η = τα = 0 for all |α| ≤ 1, a contradiction. +As a consequence, we achieve the conclusion of the proposition. +Now, assume that the conclusion of Theorem (2) does not hold. It implies that we can find two +sequences G′ +n and G′ +∗,n such that An = ∥pG′n − pG′∗,n∥2/K(G′ +n, G′ +∗,n) → 0 as n → ∞. Since Θ and +Ω are two bounded subsets, we can find subsequences of G′ +n and G′ +∗,n such that K(G′ +n, G1) and +K(G′ +∗,n, G2) vanish to 0 as n → ∞ where G1, G2 are some discrete measures having one component +18 + +to be (µ0, Σ0). Because An → 0, we obtain V (pG′n, pG′∗,n) → 0 as n → ∞. By means of Fatou’s +lemma, we have +0 = lim +n→∞ +� ���(pG′n(x) − pG′∗,n(x)) +��� dx ≥ +� +lim inf +n→∞ +���(pG′n(x) − pG′∗,n(x)) +��� dx = V (pG1(x), pG2(x)). +Due to the fact that f is distinguishable from h0 up to the first order, the above equation implies +that G1 ≡ G2. However, from the result of Proposition 2, regardless of the value of G1 we would +have An ̸→ 0 as n → ∞, which is a contradiction. +Therefore, we obtain the conclusion of the +theorem. +A.2 +Proof of Theorem 3 +Utilizing the same Fatou’s argument as that of Proposition 2 , to achieve the conclusion of the first +inequality in Theorem 3 it suffices to demonstrate the following result +Proposition 3. Given the assumptions in Theorem 3 and G = (λ, µ, Σ) such that λ ∈ [0, 1] and +(µ, Σ) can be identical to (µ0, Σ0). Then, the following holds +(a) If (µ0, Σ0) ̸= (µ, Σ) and λ > 0, then +lim +ǫ→0 inf +G,G∗ +�∥pG − pG∗∥∞ +K(G, G∗) +: K(G, G) ∨ K(G∗, G) ≤ ǫ +� +> 0. +(b) If (µ0, Σ0) ≡ (µ, Σ) or (µ0, Σ0) ̸= (µ, Σ) and λ = 0, then +lim +ǫ→0 inf +G,G∗ +�∥pG − pG∗∥∞ +D(G, G∗) +: D(G, G) ∨ D(G∗, G) ≤ ǫ +� +> 0. +Proof. The proof of part (a) is essentially similar to that of Proposition 2; therefore, we only +provide the proof for the challenging settings of part (b). Here, we only consider the setting that +(µ0, Σ0) ≡ (µ, Σ) as the proof for other possibilities of (µ0, Σ0) can be argued in the similar fashion. +For the transparency of our proof argument, we assume that T is an identity mapping. Under +this assumption, (µ0, Σ0) = (µ0, Σ0), G = (λ, θ0, Σ0), and h0(x|θ0, Σ0) = f(x|θ0, Σ0) for all x ∈ +X. Assume that the conclusion of Proposition 3 does not hold. It implies that we can find two +sequences Gn = (λn, µn, Σn) and G∗,n = (λ∗ +n, µ∗ +n, Σ∗ +n) such that D(Gn, G) = λn∥(∆µn, ∆Σn)∥2 → 0, +D(G∗,n, G) = λ∗ +n∥(∆µ∗ +n, ∆Σ∗ +n)∥2 → 0, and ∥pGn − pG∗,n∥∞/D(Gn, G∗,n) → 0 as n → ∞. For the +transparency of presentation, we denote An = ∥(∆µn, ∆Σn)∥, Bn = ∥(∆µ∗ +n, ∆Σ∗ +n)∥, and Cn = +∥(µn, Σn) − (µ∗ +n, Σ∗ +n)∥ = ∥(∆µn, ∆Σn) − (∆µ∗ +n, ∆Σ∗ +n)∥. Now, we have three main cases regarding +the convergence behaviors of (µn, Σn) and (µ∗ +n, Σ∗ +n) +Case 1: +Both An → 0 and Bn → 0, i.e., (µn, Σn) and (µ∗ +n, Σ∗ +n) vanish to (µ0, Σ0) as n → ∞. Due +to the symmetry between λn and λ∗ +n, we assume without loss of generality that λ∗ +n ≥ λn for infinite +values of n. Without loss of generality, we replace these subsequences of Gn, G∗,n by the whole +sequences of Gn and G∗,n. Now, the formulation of D(Gn, G∗,n) is +D(Gn, G∗,n) = (λ∗ +n − λn)B2 +n+ +� +λnAn + λ∗ +nBn +� +Cn. +19 + +Now, by means of Taylor expansion up to the second order, we get +pGn(x) − pG∗,n(x) +D(Gn, G∗,n) += +(λ∗ +n − λn)[f(x|µ0, Σ0) − f(x|µ∗ +n, Σ∗ +n)] + λn[f(x|µn, Σn) − f(x|µ∗ +n, Σ∗ +n)] +D(Gn, G∗,n) += +(λ∗ +n − λn) +� +2� +|α|=1 +(−∆µ∗ +n)α1(−∆Σ∗ +n)α2 +α! +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) + R1(x) +� +D(Gn, G∗,n) ++ +λn +� +2� +|α|=1 +(∆µn − ∆µ∗ +n)α1(∆Σn − ∆Σ∗ +n)α2 +α! +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) + R2(x) +� +D(Gn, G∗,n) +where R1(x) and R2(x) are Taylor remainders that satisfy R1(x) = O(B2+γ +n +) and R2(x) = O(C2+γ +n +) +for some positive number γ due to the second order uniform Lipschitz condition of kernel density +function f. From the formation of D(Gn, G∗,n), since An+Bn ≥ Cn (triangle inequality), as An → 0 +and Bn → 0 it is clear that +(λn − λ∗ +n)|R1(x)|/D(Gn, G∗,n) ≤ |R1(x)|/B2 +n = O(Bγ +n) → 0 +λn|R2(x)|/D(Gn, G∗,n) ≤ |R2(x)|/ {(An + Bn)Cn} = O +� +C2+γ +n +/C2 +n +� += O(Cγ +n) → 0 +as n → ∞ for all x ∈ X. Therefore, we achieve for all x ∈ X that +� +(λn − λ∗ +n)|R1(x)| + λn|R2(x)| +� +/D(Gn, G∗,n) → 0. +Hence, we can treat [pGn(x)−pG∗,n(x)]/D(Gn, G∗,n) as a linear combination of +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) +for all x and α = (α1, α2) such that 1 ≤ |α| ≤ 2. Assume that all the coefficients of these terms go +to 0 as n → ∞. By studying the vanishing behaviors of the coefficients of +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) as +|α| = 1, we achieve the following limits +� +λn(∆µn)i − λ∗ +n(∆µ∗ +n)i +� +/D(Gn, G∗,n) → 0, +� +λn(∆Σn)uv − λ∗ +n(∆Σ∗ +n)uv +� +/D(Gn, G∗,n) → 0 +for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2 where (a)i denotes the i-th element of vector a and Auv denotes +the (u, v)-th element of matrix A. Furthermore, for any 1 ≤ i, j ≤ d (i and j can be equal), the +coefficient of +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) when (α1)i = (α1)j = 1 and α2 = 0 leads to +� +(λ∗ +n − λn)(∆µ∗ +n)i(∆µ∗ +n)j + λn(∆µn − ∆µ∗ +n)i(∆µn − ∆µ∗ +n)j +� +/D(Gn, G∗,n) → 0. +(11) +When i = j, the above limits lead to +� +(λ∗ +n − λn) {(∆µ∗ +n)i}2 + λn {(∆µn − ∆µ∗ +n)i}2 +� +/D(Gn, G∗,n) → 0. +20 + +Therefore, we would have +� +(λ∗ +n − λn)∥∆µ∗ +n∥2 + λn∥∆µn − ∆µ∗ +n∥2 +� +/D(Gn, G∗,n) → 0. +(12) +Now, as +� +λn(∆µn)i − λ∗ +n(∆µ∗ +n)i +� +/D(Gn, G∗,n) → 0 we obtain that +� +λn(∆µn)i(∆µn)j − λ∗ +n(∆µ∗ +n)i(∆µn)j +� +/D(Gn, G∗,n) +→ +0, +� +λn(∆µn)i(∆µ∗ +n)j − λ∗ +n(∆µ∗ +n)i(∆µ∗ +n)j +� +/D(Gn, G∗,n) +→ +0. +(13) +Plugging the results from (13) into (11), we ultimately achieve for any 1 ≤ i, j ≤ d that +(λ∗ +n − λn)(∆µ∗ +n)i(∆µn)j/D(Gn, G∗,n) → 0. +(14) +Using the results from (11) and (14), we would have +λn(∆µn)i(∆µn − ∆µ∗ +n)j +D(Gn, G∗,n) +→ (λ∗ +n − λn)(∆µn)i(∆µ∗ +n)j +D(Gn, G∗,n) +→ 0, +λ∗ +n(∆µ∗ +n)i(∆µn − ∆µ∗ +n)j +D(Gn, G∗,n) +→ (λ∗ +n − λn)(∆µ∗ +n)i(∆µn)j +D(Gn, G∗,n) +→ 0 +for any 1 ≤ i, j ≤ d. Therefore, it leads to +� +1≤i,j≤d +λn|(∆µn)i||(∆µn − ∆µ∗ +n)j| +D(Gn, G∗,n) += +λn +� +1≤i≤d +|(∆µn)i| � +1≤i≤d +|(∆µn − ∆µ∗ +n)i| +D(Gn, G∗,n) +→ 0, +� +1≤i,j≤d +λ∗ +n|(∆µ∗ +n)i||(∆µn − ∆µ∗ +n)j| +D(Gn, G∗,n) += +λ∗ +n +� +1≤i≤d +|(∆µn)∗ +i | � +1≤i≤d +|(∆µn − ∆µ∗ +n)i| +D(Gn, G∗,n) +→ 0. +The above results mean that +λn∥∆µn∥∥∆µn − ∆µ∗ +n∥/D(Gn, G∗,n) → 0, λ∗ +n∥∆µ∗ +n∥∥∆µn − ∆µ∗ +n∥/D(Gn, G∗,n) → 0. +(15) +By applying the above argument with the coefficients of +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) when α1 = 0 and +(α2)u1v1 = (α2)u2v2 = 1 for any two pairs (u1, v1), (u2, v2) (not neccessarily distinct) such that +1 ≤ u1, u2, v1, v2 ≤ d or (α1)i = 1 and (α2)uv = 1 for any 1 ≤ i ≤ d and 1 ≤ u, v ≤ d, we +respectively obtain that +� +(λ∗ +n − λn)∥∆Σ∗ +n∥2 + λn∥∆Σn − ∆Σ∗ +n∥2 +� +/D(Gn, G∗,n) → 0, +λn∥∆Σn∥∥∆Σn − ∆Σ∗ +n∥/D(Gn, G∗,n) → 0, λ∗ +n∥∆Σ∗ +n∥∥∆Σn − ∆Σ∗ +n∥/D(Gn, G∗,n) → 0, +λn∥∆µn∥∥∆Σn − ∆Σ∗ +n∥/D(Gn, G∗,n) → 0, λ∗ +n∥∆µ∗ +n∥∥∆Σn − ∆Σ∗ +n∥/D(Gn, G∗,n) → 0. +(16) +Combining the results from (12), (15), and (16) leads to +1 = D(Gn, G∗,n)/D(Gn, G∗,n) → 0, +21 + +which is a contradiction. As a consequence, not all the coefficients of +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) go to 0 +as 1 ≤ |α| ≤ 2. Follow the argument of Proposition 2, by denoting mn to be the maximum of the +absolute values of the coefficients of +∂|α|f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) we achieve for all x that +1 +mn +pGn(x) − pG∗,n(x) +W 2 +2 (Gn, G∗,n) +→ +2 +� +|α|=1 +τα +∂|α|f +∂µα1∂Σα2 (x|µ0, Σ0) = 0 +where τα ∈ R are some coefficients such that not all of them are 0. +Due to the second order +identifiability condition of f, the above equation implies that τα = 0 for all α such that |α| = 2, +which is a contradiction. As a consequence, Case 1 cannot happen. +Case 2: +Exactly one of An and Bn goes to 0, i.e., there exists at least one component among +(µn, Σn) and (µ∗ +n, Σ∗ +n) that does not converge to (µ0, Σ0) as n → ∞. Due to the symmetry of An +and Bn, we assume without loss of generality that An ̸→ 0 and Bn → 0, which is equivalent to +(µn, Σn) → (µ′, Σ′) ̸= (µ0, Σ0) while (µ∗ +n, Σ∗ +n) → (µ0, Σ0) as n → ∞. We denote +D′(Gn, G∗,n) = |λ∗ +n − λn|Bn + λnAn + λ∗ +nBn. +Since [pGn(x) − pG∗,n(x)]/D(Gn, G∗,n) → 0, we achieve that [pGn(x) − pG∗,n(x)]/D′(Gn, G∗,n) +→ 0 for all x as D(Gn, G∗,n) ≲ D′(Gn, G∗,n). By means of Taylor expansion up to the first order, +we have +pGn(x) − pG∗,n(x) +D′(Gn, G∗,n) += +(λ∗ +n − λn)[f(x|µ0, Σ0) − f(x|µ∗ +n, Σ∗ +n)] + λnf(x|µn, Σn) − λnf(x|µ∗ +n, Σ∗ +n) +D′(Gn, G∗,n) += +(λ∗ +n − λn) +� � +|α|=1 +(−∆µ∗ +n)α1(−∆Σ∗ +n)α2 +α! +∂f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) + R′ +1(x) +� +D′(Gn, G∗,n) ++ +λnf(x|µn, Σn) − λnf(x|µ∗ +n, Σ∗ +n) +D′(Gn, G∗,n) +where R′ +1(x) is Taylor remainder that satisfies (λ∗ +n − λn)|R′ +1(x)|/D′(Gn, G∗,n) = O(Bγ′ +n ) → 0 for +some positive number γ′ > 0. +Since (µn, Σn) and (µ∗ +n, Σ∗ +n) do not have the same limit, they +will be different when n is large enough, i.e., n ≥ M′ for some value of M′. Now, as n ≥ M′, +[pGn(x)−pG∗,n(x)]/D′(Gn, G∗,n) becomes a linear combination of +∂f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) for all |α| ≤ 1 +and f(x|µn, Σn). If all of the coefficients of these terms go to 0, we would have λn/D′(Gn, G∗,n) → 0, +(λ∗ +n − λn)(−∆µ∗ +n)i/D′(Gn, G∗,n) → 0, and (λ∗ +n − λn)(−∆Σ∗ +n)uv/D′(Gn, G∗,n) → 0 for all 1 ≤ i ≤ d1 +and 1 ≤ u, v ≤ d2. It would imply that (λ∗ +n − λn)Bn/D′(Gn, G∗,n) → 0, λnAn/D′(Gn, G∗,n) → 0, +and λnBn/D′(Gn, G∗,n) → 0. These results lead to +1 = +� +|λ∗ +n − λn|Bn + λnAn + λ∗ +nBn +� +/D′(Gn, G∗,n) → 0, +22 + +a contradiction. Therefore, not all the coefficients of +∂f +∂µα1∂Σα2 (x|µ∗ +n, Σ∗ +n) and f(x|µn, Σn) go to 0. +By defining m′ +n to be the maximum of these coefficients, we achieve for all x that +1 +m′n +pGn(x) − pG∗,n(x) +D′(Gn, G∗,n) +→ η′f(x|µ0, Σ0) + +1 +� +|α|=0 +τ ′ +α +∂|α|f +∂µα1∂Σα2 (x|µ′, Σ′) = 0, +where η′ and τ ′ +α are coefficients such that not all of them are 0, which is a contradiction to the first +order identifiability of f. As a consequence, Case 2 cannot hold. +Case 3: +Both An and Bn do not go to 0, i.e., (µn, Σn) and (µ∗ +n, Σ∗ +n) do not converge to (µ0, Σ0) +as n → ∞. +Since Dn(Gn, G∗,n) ≲ K(Gn, G∗,n) = |λn − λ∗ +n| + (λn + λ∗ +n)Cn and [pGn(x) − +pG∗,n(x)]/D(Gn, G∗,n) → 0, we achieve that [pGn(x) − pG∗,n(x)]/K(Gn, G∗,n) → 0 for all x. From +here, by using the same argument as that of the proof of Proposition 2, we also reach the contra- +diction. Therefore, Case 3 cannot happen. +In sum, we achieve the conclusion of the proposition. +A.3 +Proof of Theorem 4 +For the simplicity of proof argument, we will only consider the univariate setting of Gaussian kernel, +i.e., when both µ and Σ = σ2 are scalars. The argument for the multivariate setting of Gaussian +kernel can be argued in the rather similar fashion, which is omitted. Throughout this proof, we +denote v := σ2. Now, according to the proof argument of Proposition 2 and Proposition 3, to +achieve the conclusion of the theorem it suffices to demonstrate the following result: +Proposition 4. Given G = (λ, µ, v) such that λ ∈ [0, 1] and (µ, v) can be identical to (µ0, v0). +Then, the following holds +(a) If (µ0, v0) ̸= (µ, v) and λ > 0, then +lim +ǫ→0 inf +G,G∗ +�∥pG − pG∗∥∞ +K(G, G∗) +: K(G, G) ∨ K(G∗, G) ≤ ǫ +� +> 0. +(b) If (µ0, v0) ≡ (µ, v) or (µ0, v0) ̸= (µ, v) and λ = 0, then +lim +ǫ→0 inf +G,G∗ +�∥pG − pG∗∥∞ +Q(G, G∗) +: Q(G, G) ∨ Q(G∗, G) ≤ ǫ +� +> 0. +Proof. We will only provide the proof for part (b) since the proofs for part (a) can be argued in +similar fashion as that of Proposition 2. For the transparency of our proof argument, we also assume +that (µ0, v0) ≡ (µ, v) and T is an identity mapping, i.e., (µ0, v0) = (µ0, v0), G = (λ, θ0, v0), and +h0(x|θ0, Σ0) = f(x|θ0, Σ0) for all x ∈ X. Assume that the conclusion of Proposition 4 does not +hold. It implies that we can find two sequences Gn = (λn, µn, vn) and G∗,n = (λ∗ +n, µ∗ +n, v∗ +n) such that +Q(Gn, G) → 0, Q(G∗,n, G) → 0, and ∥pGn − pG∗,n∥∞/Q(Gn, G∗,n) → 0 as n → ∞. Due to the +symmetry between λn and λ∗ +n, we can assume without loss of generality that λ∗ +n ≥ λn. Therefore, +23 + +we achieve that +Q(Gn, G∗,n) = (λ∗ +n − λn)(|∆µ∗ +n|4 + |∆v∗ +n|2)+ +� +λn(|∆µn|2 + |∆vn|) + λ∗ +n(|∆µ∗ +n|2 + |∆v∗ +n|) +� +× +× +� +|µn − µ∗ +n|2 + |vn − v∗ +n| +� +. +In this proof, we only consider the scenario when ∥(∆µn, ∆vn)∥ → 0 and ∥(∆µ∗ +n, ∆v∗ +n)∥ → 0 since +the arguments for other settings of these two terms are similar to those of Case 2 and Case 3 in +the proof of Proposition 3. As being indicated in Section 3.2.2, the univariate Gaussian kernel +contains the partial differential equation structure ∂2f +∂µ2 (x|µ, v) = 2∂f +∂v (x|µ, v) for all µ ∈ Θ and +v ∈ Ω. Therefore, for any α = (α1, α2) we can check that +∂|α|f +∂µα1∂vα2 (x|µ, v) = +1 +2α2 +∂βf +∂µβ (x|µ, v) +where β = α1 + 2α2. Now, by means of Taylor expansion up to the fourth order, we obtain +pGn(x) − pG∗,n(x) +Q(Gn, G∗,n) += +(λ∗ +n − λn) +� +4� +|α|=1 +(−∆µ∗ +n)α1(−∆v∗ +n)α2 +α1!α2! +∂|α|f +∂µα1∂vα2 (x|µ∗ +n, v∗ +n) + R1(x) +� +Q(Gn, G∗,n) ++ +λn +� +4� +|α|=1 +(∆µn − ∆µ∗ +n)α1(∆vn − ∆v∗ +n)α2 +α1!α2! +∂|α|f +∂µα1∂vα2 (x|µ∗ +n, v∗ +n) + R2(x) +� +Q(Gn, G∗,n) += +8 +� +β=1 +� +α1,α2 +(λ∗ +n − λn)(−∆µ∗ +n)α1(−∆v∗ +n)α2 + λn(∆µn − ∆µ∗ +n)α1(∆vn − ∆v∗ +n)α2 +2α2α1!α2!Q(Gn, G∗,n) +× +∂βf +∂µβ (x|µ∗ +n, v∗ +n) + (λ∗ +n − λn)R1(x) + λnR2(x) +Q(Gn, G∗,n) +where R1(x), R2(x) are Taylor remainders and the range of α1, α2 in the summation of the second +equality satisfies β = α1 +2α2. As Gaussian kernel admits fourth-order uniform Lipschitz condition, +it is clear that +(λ∗ +n − λn)|R1(x)| + λn|R2(x)| +Q(Gn, G∗,n) += O(∥(∆µ∗ +n, ∆v∗ +n)∥γ + ∥(µn, vn) − (µ∗ +n, v∗ +n)∥γ) → 0 +as n → ∞ for some γ > 0. Therefore, we can consider [pGn(x) − pG∗,n(x)]/Q(Gn, G∗,n) as a linear +combination of ∂βf +∂µβ (x|µ∗ +n, v∗ +n) for 1 ≤ β ≤ 8. If all of the coefficients of these terms go to 0, then +we obtain +Lβ = +� +α1,α2 +(λ∗ +n − λn)(−∆µ∗ +n)α1(−∆v∗ +n)α2 + λn(∆µn − ∆µ∗ +n)α1(∆vn − ∆v∗ +n)α2 +2|α2|α1!α2! +Q(Gn, G∗,n) +→ 0 +for any 1 ≤ β ≤ 8. Now, we divide our argument with Lβ into two key cases +24 + +Case 1: +� +λn(|∆µn|2 + |∆vn|) + λ∗ +n|(∆µ∗ +n|2 + |∆v∗ +n|) +� +/ +� +λn(|µn − µ∗ +n|2 + |vn − v∗ +n|) +� +̸→ ∞. It +implies that as n is large enough, we would have +Q(Gn, G∗,n) ≲ (λ∗ +n − λn)(|∆µ∗ +n|4 + |∆v∗ +n|2) + λn(|∆µn − ∆µ∗ +n|4 + |∆vn − ∆v∗ +n|2). +Combining the above result with Lβ → 0 for all 1 ≤ β ≤ 8, we get +Hβ = +� +α1,α2 +(λ∗ +n − λn)(−∆µ∗ +n)α1(−∆v∗ +n)α2 + λn(∆µn − ∆µ∗ +n)α1(∆vn − ∆v∗ +n)α2 +2|α2|α1!α2! +(λ∗n − λn)(|∆µ∗n|4 + |∆v∗n|2) + λn(|∆µn − ∆µ∗n|4 + |∆vn − ∆v∗n|2) +→ 0 +Note that, when the denominator of the above limits is (λ∗ +n − λn)(|∆µ∗ +n|4 + |∆v∗ +n|4) + λn(|∆µn − +∆µ∗ +n|4 +|∆vn −∆v∗ +n|4), the technique for studying the above system of limits with this denominator +has been considered in Proposition 2.3 in [16]. +However, since the current denominator of Hβ +strongly dominates by the previous denominator, we must develop a more sophisticated control of +Hβ as 1 ≤ β ≤ 8 to obtain a concrete understanding of their limits. Due to the symmetry between +λ∗ +n−λn and λn, we assume without loss of generality that λ∗ +n−λn ≤ λn for all n (by the subsequence +argument). We have two possibilities regarding λn and λ∗ +n +Case 1.1: +(λ∗ +n − λn)/λn ̸→ 0 as n → ∞. Under that setting, we define pn = max {λ∗ +n − λn, λn} +and +Mn = max +� +|∆µ∗ +n|, |∆µ∗ +n − ∆µn|, |∆v∗ +n|1/2, |∆v∗ +n − ∆vn|1/2� +Additionally, we let (λ∗ +n − λn)/pn → c2 +1, λn/pn → c2 +2, ∆µ∗ +n/Mn → −a1, (∆µ∗ +n − ∆µn)/Mn → a2, +∆v∗ +n/M2 +n → −2b1, and (∆vn − ∆v∗ +n)/M2 +n → 2b2. From here, at least one among a1, a2, b1, b2 and +both c1, c2 are different from 0. Now, by dividing both the numerators and the denominators of Hβ +as 1 ≤ β ≤ 4 by pnMβ +n , we achieve the following system of polynomial equations +c2 +1a1 + c2 +2a2 = 0 +1 +2(c2 +1a2 +1 + c2 +2a2 +2) + c2 +1b1 + c2 +2b2 = 0 +1 +3!(c2 +1a3 +1 + c2 +2a3 +2) + c2 +1a1b1 + c2 +2a2b2 = 0 +1 +4!(c2 +1a4 +1 + c2 +2a4 +2) + 1 +2!(c2 +1a2 +1b1 + c2 +2a2 +2b2) + 1 +2!(c2 +1b2 +1 + c2 +2b2 +2) = 0, +As being indicated in Proposition 2.1 in [16], this system will only admits the trivial solution, i.e., +a1 = a2 = b1 = b2 = 0, which is a contradiction. Therefore, Case 1.1 cannot happen. +Case 1.2: +(λ∗ +n − λn)/λn → 0, i.e., λ∗ +n/λn → 1, as n → ∞. +Under that setting, if Mn ∈ +max +� +|∆µn − ∆µ∗ +n|, |∆vn − ∆v∗ +n|1/2� +, then we have +λnM4 +n = max +� +(λ∗ +n − λn)|∆µ∗ +n|4, (λ∗ +n − λn)|∆µn − ∆µ∗ +n|4, λn|∆v∗ +n|2, λn|∆vn − ∆v∗ +n|2 +� +. +25 + +By dividing both the numerator and the denominator of H1 by λnMn, given that the new denomi- +nator of H1 goes to 0, its new numerator also goes to 0, i.e., we obtain +(λ∗ +n − λn)(−∆µ∗ +n)/ {λnMn} + (∆µn − ∆µ∗ +n)/Mn → 0. +Since (λ∗ +n − λn)/λn → 0 and |∆µ∗ +n| ≤ Mn, we have (λ∗ +n − λn)(−∆µ∗ +n)/ {λnMn} → 0. Therefore, we +have (∆µn − ∆µ∗ +n)/Mn → 0. With the previous results, by dividing both the numerator and the +denominator of H2 by λnM2 +n and given that the new denominator goes to 0, we have +(λ∗ +n − λn)(−∆v∗ +n)/ +� +λnM2 +n +� ++ (∆vn − ∆v∗ +n)/M2 +n → 0. +As (λ∗ +n − λn)(−∆v∗ +n)/ +� +λnM2 +n +� +→ 0 (due to the assumption of Mn), we get (∆vn − ∆v∗ +n)/M2 +n → 0. +These results imply that +1 = max +� +|∆µn − ∆µ∗ +n|2, |∆vn − ∆v∗ +n| +� +M2n +→ 0, +which is a contradiction. Therefore, we would only have Mn ∈ max +� +|∆µ∗ +n|, |∆v∗ +n|1/2� +. For the +simplicity of the proof, we only consider the setting when Mn = |∆µ∗ +n| for all n (by subsequence +argument). The setting that Mn = |∆v∗ +n|1/2 for all n can be argued in the similar fashion. Now, if +we have +max +� +|∆µn − ∆µ∗ +n|, |∆vn − ∆v∗ +n|1/2� +/Mn ̸→ 0, +then by dividing the numerator and denominator of Hi with λn +� +max +� +|∆µn − ∆µ∗ +n|, |∆vn − ∆v∗ +n|1/2��i +as 1 ≤ i ≤ 2, we would achieve +1 = max +� +|∆µn − ∆µ∗ +n|2, |∆vn − ∆v∗ +n| +� +max {|∆µn − ∆µ∗n|2, |∆vn − ∆v∗n|} → 0, +a contradiction. Therefore, we must have +max +� +|∆µn − ∆µ∗ +n|, |∆vn − ∆v∗ +n|1/2� +/Mn ̸→ 0 +(17) +as n → ∞. Now, we further divide the argument under that setting of Mn into two small cases +Case 1.2.1: +(λ∗ +n − λn)|∆µ∗ +n|4 ≤ λn|∆µn − ∆µ∗ +n|4 for all n (by subsequence argument). Since +Mn = |∆µ∗ +n|, we would have (λ∗ +n − λn)|∆µ∗ +n|i ≤ λn|∆µn − ∆µ∗ +n|i for all n and 1 ≤ l ≤ 4. From here, +we obtain that +(λ∗ +n − λn)|∆µ∗ +n|4 +λn|∆µn − ∆µ∗n| ≤ λn|∆µn − ∆µ∗ +n|4 +λn|∆µn − ∆µ∗n| → 0, +(λ∗ +n − λn)|∆v∗ +n|2 +λn|∆µn − ∆µ∗n| ≤ (λ∗ +n − λn)|∆µ∗ +n|4 +λn|∆µn − ∆µ∗n| → 0. +If |∆µn − ∆µ∗ +n|/|∆vn − ∆v∗ +n|1/2 ̸→ 0, by diving both the numerator and the denominator of H1 by +λn|∆µn − ∆µ∗ +n| and given that the new denominator goes to 0, the new numerator must converge +to 0, i.e. we have +(λ∗ +n − λn)∆µ∗ +n/ {λn(∆µn − ∆µ∗ +n)} → −1. +26 + +However, since we have |(∆µn − ∆µ∗ +n)/∆µ∗ +n → 0, the above result would imply that +(λ∗ +n − λn)|∆µ∗ +n|4/ +� +λn|∆µn − ∆µ∗ +n|4� +→ ∞, +which is a contradiction to the assumption of Case 1.2.1.1. As a consequence, we must have |∆µn − +∆µ∗ +n|/|∆vn − ∆v∗ +n|1/2 → 0. Now, we also have that +(λ∗ +n − λn)|∆µ∗ +n|4 +λn|∆vn − ∆v∗n|i/2 ≲ +λn|∆vn − ∆v∗ +n|2 +λn|∆µn − ∆µ∗n|i/2 → 0, +(λ∗ +n − λn)|∆v∗ +n|4 +λn|∆vn − ∆v∗n|i/2 ≤ (λ∗ +n − λn)|∆µ∗ +n|4 +λn|∆vn − ∆v∗n|i/2 → 0. +for all 1 ≤ i ≤ 3. Without loss of generality, we assume that ∆vn − ∆v∗ +n > 0 for all n. We denote +(−∆µ∗ +n) = qn +1 (∆vn − ∆v∗ +n) and ∆v∗ +n = qn +2 (∆vn − ∆v∗ +n) for all n. From the result of (17), we would +have |qn +1 | → ∞. Given the above results, by dividing the numerators and the denominators of Hβ +by λn(∆vn − ∆v∗ +n)β/2 for any 1 ≤ β ≤ 3, we would have the new denominators go to 0. Therefore, +all the new numerators of these Hβ also go to 0, i.e. we achieve the following system of limits +λ∗ +n − λn +λn +qn +1 → 0, λ∗ +n − λn +λn +� +(qn +1 )2 + qn +2 +� ++ 1 → 0, λ∗ +n − λn +λn +�(qn +1 )3 +6 ++ qn +1 qn +2 +2 +� +→ 0. +Since |qn +1 | → ∞, the last limit in the above system implies that (λ∗ +n − λn) +�(qn +1 )2 +3 ++ qn +2 +� +/λn → 0. +Combining this result with the second limit in the above system yields that (λ∗ +n − λn)(qn +1 )2/λn + +3/2 → 0, which cannot happen. Therefore, Case 1.2.1 does not hold. +Case 1.2.2: +(λ∗ +n − λn)|∆µ∗ +n|4 > λn|∆µn − ∆µ∗ +n|4 for all n (by subsequence argument). If (λ∗ +n − +λn)|∆µ∗ +n|4 ≤ λn|∆vn − ∆v∗ +n|2 for all n, the by using the same argument as that of Case 1.2.1, we +quickly achieve the contradiction. Therefore, we must have (λ∗ +n − λn)|∆µ∗ +n|4 > λn|∆vn − ∆v∗ +n|2. +Denote (∆µn − ∆µ∗ +n) = mn +1(−∆µ∗ +n), (−∆v∗ +n) = mn +2(∆µ∗ +n)2, and (∆vn − ∆v∗ +n) = mn +3(∆µ∗ +n)2. Since +Mn = |∆µ∗ +n|, we would have |mn +i | ≤ 1 for all 1 ≤ i ≤ 3. Denote mn +i → mi for all 1 ≤ i ≤ 3 +(by subsequence argument). The results of (17) lead to m1 = m3 = 0. Now by dividing both the +numerator and denominator of Hβ by (λ∗ +n−λn)(−∆µ∗ +n)β for any 1 ≤ β ≤ 4, as the new denominators +of H|β| do not go to ∞, we would also achieve that the new numerators of H|β| go to 0, i.e. the +following system of limits hold +1 + λ∗ +n − λn +λn +mn +1 → 0, +� +1 + λ∗ +n − λn +λn +(mn +1)2 +� ++ mn +2 + λ∗ +n − λn +λn +mn +3 → 0, +� +1 + λ∗ +n − λn +λn +(mn +1)3 +� +/6+ +� +mn +2 + λ∗ +n − λn +λn +mn +1mn +3 +� +/2 → 0, +� +1 + λ∗ +n − λn +λn +(mn +1)4 +� +/24+ +� +mn +2 + λ∗ +n − λn +λn +(mn +3)2 +� +/4+ +� +(mn +2)2 + λ∗ +n − λn +λn +(mn +3)2 +� +/8 → 0. +Combining with mn +1 → 0, the first and third limit of the above system of limits imply that m2 = +−1/3. +From here, the second and fourth limit yields that 1/6 + m2 + m2 +2/2 = 0, which is a +contradiction. Therefore, Case 1.2.2 cannot hold. +27 + +Case 2: +� +λn(|∆µn|2 + |∆vn|) + λ∗ +n(|∆µ∗ +n|2 + |∆v∗ +n|) +� +/ +� +λn(|µn − µ∗ +n|2 + |vn − v∗ +n|) +� +→ ∞. We +define +Q(Gn, G∗,n) += +(λ∗ +n − λn)(|∆µn|2 + |∆vn|)(|∆µ∗ +n|2 + |∆v∗ +n|)+ +� +λn(|∆µn|2 + |∆vn|) ++ +λ∗ +n(|∆µ∗ +n|2 + |∆v∗ +n|) +�� +|µn − µ∗ +n|2 + |vn − v∗ +n| +� +. +We will demonstrate that Q(Gn, G∗,n) ≍ Q(Gn, G∗,n). +In fact, from the above formulation of +Q(Gn, G∗,n), we would have that +Q(Gn, G∗,n) +≤ +2(λ∗ +n − λn)(|∆µ∗ +n|2 + |∆µn − ∆µ∗ +n|2 + |∆v∗ +n| + |∆vn − ∆v∗ +n|)(|∆µ∗ +n|2 + |∆v∗ +n|) ++ +2 +� +λn(|∆µn|2 + |∆vn|) + λ∗ +n(|∆µ∗ +n|2 + |∆v∗ +n|) +�� +|µn − µ∗ +n|2 + |vn − v∗ +n| +� +≤ +2Q(Gn, G∗,n) +where the first inequality is due to the triangle inequality and basic inequality (a + b)2 ≤ 2(a2 + b2) +and the second inequality is due to the following result +(λ∗ +n − λn)(|∆µn − ∆µ∗ +n|2 + |∆vn − ∆v∗ +n|) ≤ λ∗ +n +� +|µn − µ∗ +n|2 + |vn − v∗ +n| +� +On the other hand, we also have that +2Q(Gn, G∗,n) +≥ +(λ∗ +n − λn)(|∆µ∗ +n|2 + |∆v∗ +n|)(|∆µn|2 + |∆vn| + |µn − µ∗ +n|2 + |vn − v∗ +n|) ++ +� +λn(|∆µn|2 + |∆vn|) + λ∗ +n(|∆µ∗ +n|2 + |∆v∗ +n|) +�� +|µn − µ∗ +n|2 + |vn − v∗ +n| +� +≥ +Q(Gn, G∗,n)/2 +where the last inequality is due to triangle inequality and basic inequality (a + b)2 ≤ 2(a2 + b2). +Therefore, we conclude that Q(Gn, G∗,n) ≍ Q(Gn, G∗,n). Now, since Hβ → 0 for all 1 ≤ β ≤ 8, we +would have that +Fβ = +� +α1,α2 +(λ∗ +n − λn)(−∆µ∗ +n)α1(−∆v∗ +n)α2 + λn(∆µn − ∆µ∗ +n)α1(∆vn − ∆v∗ +n)α2 +2|α2|α1!α2! +Q(Gn, G∗,n) +→ 0. +Similar to Case 1, under Case 2 we also consider two distincts setting of λ∗ +n/λn +Case 2.1: +λ∗ +n/λn ̸→ ∞. Under this case, we denote +M′ +n := max +� +|∆µn|2, |∆vn|, |∆µ∗ +n|2, |∆v∗ +n| +� +. +From the assumption of Case 2, we would have +|∆µn − ∆µ∗ +n|2/M′ +n → 0, |∆vn − ∆v∗ +n|/M′ +n → 0. +(18) +Due to the symmetry between (|∆µn|2, |∆vn|) and (|∆µ∗ +n|2, |∆v∗ +n|), we assume without loss of gen- +erality that M′ +n ∈ max +� +|∆µn|2, |∆vn| +� +. Under that assumption, we have two distinct cases +28 + +Case 2.1.1: +M′ +n = |∆µn|2 for all n (by the subsequence argument). From (18), we have |∆µn − +∆µ∗ +n|/|∆µn| → 0, i.e., ∆µn/∆µ∗ +n → 1. To be able to utilize the assumptions of Case 2, we will need +to study the formulations of Fβ more deeply. In fact, when β = 1 simple calculation yields +A1 := (λn∆µn − λ∗ +n∆µ∗ +n)/Q(Gn, G∗,n) → 0. +When β = 2, we have +F2 = (λ∗ +n − λn)(∆µ∗ +n)2 + λn(∆µn − ∆µ∗ +n)2 + (λ∗ +n − λn)(−∆v∗ +n) + λn(∆vn − ∆v∗ +n) +Q(Gn, G∗,n) +→ 0. +Combining with the result of A1, it is clear that +(λ∗ +n − λn)(∆µ∗ +n)2 + λn(∆µn − ∆µ∗ +n)2 +Q(Gn, G∗,n) +→ (λ∗ +n − λn)∆µn∆µ∗ +n +Q(Gn, G∗,n) +. +Combining the above result with F2 → 0, we would have +A2 := (λ∗ +n − λn)∆µn∆µ∗ +n + λn∆vn − λ∗ +n∆v∗ +n +Q(Gn, G∗,n) +→ 0. +Now, we have two small cases +Case 2.1.1.1: +∆vn/(∆µn)2 → 0 as n → ∞. From (18), since we have |∆vn − ∆v∗ +n|/(∆µn)2 → 0, +it implies that ∆v∗ +n/(∆µn)2 → 0. Since ∆µn/∆µ∗ +n → 1, we also have that ∆v∗ +n/(∆µ∗ +n)2 → 0. Now, +from the formulations of Q(Gn, G∗,n) we have +(λ∗ +n − λn)|∆vn∆v∗ +n|/Q(Gn, G∗,n) ≤ |∆vn|/|∆µn|2 → 0, +(λ∗ +n − λn)|∆vn(∆µ∗ +n)2|/Q(Gn, G∗,n) ≤ |∆vn|/|∆µn|2 → 0, +(λ∗ +n − λn)|∆v∗ +n(∆µn)2|/Q(Gn, G∗,n) ≤ |∆v∗ +n|/|∆µ∗ +n|2 → 0. +(19) +From the result that A2 → 0, by multiplying A2 with ∆µn∆µ∗ +n, we would also have that +(λ∗ +n − λn)(∆µn∆µ∗ +n)2 + (λn∆vn − λ∗ +n∆v∗ +n)∆µn∆µ∗ +n +Q(Gn, G∗,n) +→ 0. +(20) +As λ∗ +n/λn ̸→ ∞, we have two distinct settings of λ∗ +n/λn +Case 2.1.1.1.1: +λ∗ +n/λn ̸→ 1. Using the result from (19) and the fact that ∆µn/∆µ∗ +n → 1, we +would obtain that +(λn∆vn − λ∗ +n∆v∗ +n)∆µn∆µ∗ +n/Q(Gn, G∗,n) → 0. +Combining the above result with (20), it leads to +(λ∗ +n − λn)(∆µn∆µ∗ +n)2/Q(Gn, G∗,n) → 0. +(21) +Combining (19) and (21), we would achieve that +(λ∗ +n − λn)(|∆µn|2 + |∆vn|)(|∆µ∗ +n|2 + |∆v∗ +n|) +Q(Gn, G∗,n) +→ 0. +29 + +From the formulation of Q(Gn, G∗,n), the above limit implies that +E := +� +λn(|∆µn|2 + |∆vn|) + λ∗ +n|(∆µ∗ +n|2 + |∆v∗ +n|) +�� +|µn − µ∗ +n|2 + |vn − v∗ +n| +� +Q(Gn, G∗,n) +→ 1. +Due to the previous assumptions, we obtain that +E ≲ max +� +λn(∆µn)2(∆µn − ∆µ∗ +n)2, λn(∆µn)2(∆vn − ∆v∗ +n) +� +Q(Gn, G∗,n) +. +By combining the results from (19) and (21), we can verify that +λn(∆µn)2(∆µn − ∆µ∗ +n)2 +Q(Gn, G∗,n) +→ +(λ∗ +n − λn) +� +− (∆µn)2(∆µ∗ +n)2 + (∆µn)3∆µ∗ +n +� +Q(Gn, G∗,n) +→ 0, +λn(∆µn)2(∆vn − ∆v∗ +n) +Q(Gn, G∗,n) +→ (λ∗ +n − λn)(∆µn)2∆v∗ +n +Q(Gn, G∗,n) +→ 0. +(22) +Therefore, we achieve E → 0, which is a contradiction. As a consequence, Case 2.1.1.1.1 cannot +happen. +Case 2.1.1.1.2: +λ∗ +n/λn → 1. Under this case, if we have +max +�λn|∆µn − ∆µ∗ +n|2 +(λ∗n − λn)|∆µn|2 , λn|∆vn − ∆v∗ +n| +(λ∗n − λn)|∆µn|2 +� +→ ∞, +then we will achieve that +(λ∗ +n − λn)(∆µn∆µ∗ +n)2/Q(Gn, G∗,n) ≤ min +� (λ∗ +n − λn)|∆µ∗ +n|2 +λn|∆µn − ∆µ∗n|2 , (λ∗ +n − λn)|∆µ∗ +n|2 +λn|∆vn − ∆v∗n| +� +→ 0. +From here, by using the same argument as that of Case 2.1.1.1.1, we will obtain E → 0, which is a +contradiction. Therefore, we would have that +max +�λn|∆µn − ∆µ∗ +n|2 +(λ∗n − λn)|∆µn|2 , λn|∆vn − ∆v∗ +n| +(λ∗n − λn)|∆µn|2 +� +̸→ ∞. +(23) +With that assumption, it leads to Q(Gn, G∗,n) ≍ (λ∗ +n − λn)(∆µ∗ +n)2(∆µn)2 ≍ (λ∗ +n − λn)(∆µ∗ +n)4 as +∆µ∗ +n/∆µn → 1. Now, we denote ∆µn − ∆µ∗ +n = τ n +1 ∆µ∗ +n and ∆vn − ∆v∗ +n = τ n +2 (∆µ∗ +n)2. From the +assumption of Case 2.1.1.1, we would have that τ n +1 → 0 and τ n +2 → 0. By dividing both the numerator +and the denominator of F3 by (λ∗ +n − λn)(∆µ∗ +n)3, as the new denominators of F3 goes to 0, we also +obtain the numerator of this term goes to 0, i.e., the following holds +� +−1 + +λn +λ∗n − λn +(τ n +1 )3 +� +/6 + +λn +2(λ∗n − λn)τ n +1 τ n +2 → 0. +From (23), we have that λn(τ n +1 )2/(λ∗ +n − λn) ̸→ ∞ and λnτ n +2 /(λ∗ +n − λn) ̸→ ∞. Therefore, since +τ n +1 → 0 and τ n +2 → 0, we would achieve that λn(τ n +1 )3/(λ∗ +n − λn) → 0 and λnτ n +1 τ n +2 /(λ∗ +n − λn) → 0. By +plugging these results to the above limit, it implies that −1/6 = 0, which is a contradiction. As a +consequence, Case 2.1.1.1.2 cannot hold. +30 + +Case 2.1.1.2: +∆vn/(∆µn)2 ̸→ 0 as n → ∞. Under that case, we will only consider the setting +that λ∗ +n/λn → 1 as the argument for other settings of that ratio can be argued in the similar +fashion. Since we have |∆vn − ∆v∗ +n|/(∆µn)2 → 0, it leads to ∆v∗ +n/(∆µn)2 ̸→ 0. Combining with +∆µn/∆µ∗ +n → 1, it implies that as n is large enough we would have +max +� +(∆µn)2, (∆µ∗ +n)2� +≲ min {|∆vn|, |∆v∗ +n|} . +(24) +According the formulation of Q(Gn, G∗,n), we achieve +(λ∗ +n − λn)|∆vn∆v∗ +n| +Q(Gn, G∗,n) +≤ min +�(λ∗ +n − λn)|∆v∗ +n| +λn|∆vn − ∆v∗n|, (λ∗ +n − λn)|∆vn| +λ∗n|∆vn − ∆v∗n|, (λ∗ +n − λn)|∆v∗ +n| +λn|∆µn − ∆µ∗n|2 , +(λ∗ +n − λn)|∆vn| +λ∗n|∆µn − ∆µ∗n|2 +� += B. +If we have B → 0, we would get (λ∗ +n − λn)|∆vn∆v∗ +n| +Q(Gn, G∗,n) +→ 0. Combining with (24), we can check +that all the results in (19), (21), and (22) hold. With similar argument as Case 2.1.1.1, we achieve +Q(Gn, G∗,n)/Q(Gn, G∗,n) → 0, a contradiction. Therefore, we must have B ̸→ 0. It implies that as +n is large enough we must have +max {λn, λ∗ +n} max +� +|∆µn − ∆µ∗ +n|2, |∆vn − ∆v∗ +n| +� +≲ (λ∗ +n − λn) min {|∆vn|, |∆v∗ +n|} . +Furthermore, as (λ∗ +n −λn)/λn → 0, we obtain |∆v∗ +n|/|∆vn −∆v∗ +n| → ∞ and |∆v∗ +n|/|∆µn −∆µ∗ +n|2 → +∞, i.e., ∆vn/∆v∗ +n → 1. With all of these results, we can check that Q(Gn, G∗,n) ≲ (λ∗ +n − λn)|∆v∗ +n|2. +Denote (∆vn −∆v∗ +n) = kn +1 |∆v∗ +n|, (∆µn −∆µ∗ +n) = kn +2 |∆v∗ +n|1/2, and ∆µ∗ +n = kn +3 |∆v∗ +n|1/2 for all n. From +all the assumptions we have thus far, we get kn +1 → 0, kn +2 → 0, and |kn +3 | ̸→ ∞. Additionally, as +B ̸→ 0, we further have λn|kn +1 |/(λ∗ +n − λn) ̸→ ∞ and λn(kn +2 )2/(λ∗ +n − λn) ̸→ ∞. By dividing both the +numerator and the denominator of F3 and F4 respectively by (λ∗ +n−λn)|∆v∗ +n|3/2 and (λ∗ +n−λn)|∆v∗ +n|2, +as the new denominators of F3, F4 do not go to infinity, we obtain the new numerators of these terms +go to 0, i.e., the following holds +� +− (kn +3 )3 + +λn +λ∗n − λn +(kn +2 )3 +� +/6+ +� +kn +3 + +λn +λ∗n − λn +kn +1 kn +2 +� +/2 → 0, +� +(kn +3 )4 + +λn +λ∗n − λn +(kn +2 )4 +� +/24+ +� +− (kn +3 )2 + +λn +λ∗n − λn +kn +1 (kn +2 )2 +� +/4+ +� +1 + +λn +λ∗n − λn +(kn +1 )2 +� +/8 → 0. +With the assumptions with kn +1 , kn +2 , and kn +3 , we would have +λn +λ∗n − λn +(kn +2 )i → 0, +λn +λ∗n − λn +kn +1 (kn +2 )j → 0, +λn +λ∗n − λn +(kn +1 )2 → 0 +for any 3 ≤ i ≤ 4 and 1 ≤ j ≤ 2. If we denote kn +3 → k3, by combining all the above results we +achieve the following system of equations +−k3 +3/6 + k3/2 = 0, k4 +3/24 − k2 +3/4 + 1/8 = 0, +which does not admit a solution, a contradiction. Hence, Case 2.1.1.2 cannot hold. +31 + +Case 2.1.2: +M′ +n = |∆vn| for all n (by the subsequence argument). From (18), we would have +|∆vn − ∆v∗ +n|/|∆vn| → 0, i.e., ∆vn/∆v∗ +n → 1, and |∆µn − ∆µ∗ +n|2/|∆vn| → 0. The argument under +this case is rather similar to that of Case 2.1; therefore, we only sketch the key steps. By using the +result that A1 → 0 and A2 → 0, we would obtain that +(λ∗ +n − λn)(∆µ∗ +n)4 + λn(∆µn − ∆µ∗ +n)4 +24Q(Gn, G∗,n) +→ +(λ∗ +n − λn)∆µn∆µ∗ +n +� +(∆µn)2 − 3∆µn∆µ∗ +n + 3(∆µ∗ +n)2 +� +24Q(Gn, G∗,n) +, +(λ∗ +n − λn)(∆v∗ +n)2 + λn(∆vn − ∆v∗ +n)2 +8Q(Gn, G∗,n) +→ +(λ∗ +n − λn)∆µ∗ +n +� +∆µn∆vn − ∆µ∗ +n∆vn − ∆un∆v∗ +n +� +8Q(Gn, G∗,n) +, +λn(∆µn − ∆µ∗ +n)2(∆vn − ∆v∗ +n) +4Q(Gn, G∗,n) +→ +(λ∗ +n − λn) +� +∆µn∆µ∗ +n∆v∗ +n − ∆µn∆µ∗ +n∆vn + ∆vn∆v∗ +n +� +4Q(Gn, G∗,n) +. +As F4 → 0, we equivalently have +A4 +:= +(λ∗ +n − λn) +�∆µn∆µ∗ +n +� +(∆µn)2 − 3∆µn∆µ∗ +n + 3(∆µ∗ +n)2 +� +24Q(Gn, G∗,n) ++ +∆µn∆µ∗ +n∆vn − 2(∆µ∗ +n)2∆vn − ∆µn∆µ∗ +n∆v∗ +n + ∆vn∆v∗ +n +8Q(Gn, G∗,n) +� +→ 0. +Under Case 2.1.2, we only consider the setting when (∆µn)2/∆vn → 0 as other settings of this +term can be argued in the similar fashion as that of Case 2.1.2. Since |∆µn − ∆µ∗ +n|2/|∆vn| → 0, +we have (∆µ∗ +n)/∆vn → 0. As ∆vn/∆v∗ +n → 1, we also further have that (∆µ∗ +n)2/∆v∗ +n → 0 and +(∆µn)2/∆vn → 0. Therefore, we have ∆µn∆µ∗ +n/∆vn → 0 and ∆µn∆µ∗ +n/∆v∗ +n → 0. Now, from the +formulation of Q(Gn, G∗,n), we achieve +(λ∗ +n − λn)|∆µn∆µ∗ +n|2/Q(Gn, G∗,n) ≤ |∆µn|2/|∆v∗ +n|2 → 0, +(λ∗ +n − λn)|∆vn(∆µ∗ +n)2|/Q(Gn, G∗,n) ≤ |∆µ∗ +n|2/|∆v∗ +n| → 0, +(λ∗ +n − λn)|∆v∗ +n(∆µn)2|/Q(Gn, G∗,n) ≤ |∆µn|2/|∆vn| → 0, +(λ∗ +n − λn)|∆µn|3|∆µ∗ +n|/Q(Gn, G∗,n) ≤ |∆µn||∆µ∗ +n|/|∆v∗ +n| → 0, +(λ∗ +n − λn)|∆µn||∆µ∗ +n|3/Q(Gn, G∗,n) ≤ |∆µn||∆µ∗ +n|/|∆vn| → 0. +Combining these results with A4 → 0, we achieve (λ∗ +n − λn)∆vn∆v∗ +n/Q(Gn, G∗,n) → 0. From here, +we can easily verify that all the results in (22) hold. Thus, by using the same argument as that of +Case 2.1.1, we would get Q(Gn, G∗,n)/Q(Gn, G∗,n) → 0, a contradiction. As a consequence, Case +2.1.2 cannot happen. +Case 2.2: +λ∗ +n/λn → ∞. Remind that M′ +n = max +� +|∆µn|2, |∆vn|, |∆µ∗ +n|2, |∆v∗ +n| +� +. We can verify +that Q(Gn, G∗,n) ≲ λ∗ +n(M′ +n)4. By dividing both the numerator and the denominator of A1 and A2 +respectively by λ∗ +n(M′ +n)1/2 and λ∗ +nM′ +n, given that the new denominators go to 0 we would obtain +32 + +the new numerators also go to 0, i.e., we have the following results +λn∆µn +n/ +� +λ∗ +n(M′ +n)1/2� +− ∆µ∗ +n/(M′ +n)1/2 → 0, +� +(λ∗ +n − λn)∆µn∆µ∗ +n + λn∆vn − λ∗ +n∆v∗ +n +� +/ +� +λ∗ +nM′ +n +� +→ 0. +Since λn/λ∗ +n → 0, the first limit implies that ∆µ∗ +n/M′ +n → 0. Combining this result with the second +limit, we obtain ∆v∗ +n/M′ +n → 0. Therefore, we would have M′ +n = max +� +|∆µn|2, |∆vn| +� +. Without +loss of generality, we assume that M′ +n = |∆µn|2 as the argument for other possibility of M′ +n can be +argued in the similar fashion. With these assumptions, |∆vn − ∆v∗ +n|/|∆µn|2 ̸→ ∞, i.e., as n is large +enough we get |∆vn − ∆v∗ +n| ≲ |∆µn|2. Now, we have two distinct cases +Case 2.2.1: +λ∗ +n max +� +|∆µ∗ +n|2, |∆v∗ +n| +� +/(λn|∆µn|2) → ∞. Due to this assumption, we can check +that as n is large enough, Q(Gn, G∗,n) ≍ λ∗ +n|∆µn|2 max +� +|∆µ∗ +n|2, |∆v∗ +n| +� +. If max +� +|∆µ∗ +n|2, |∆v∗ +n| +� += +|∆µ∗ +n|2 for all n, then by dividing both the numerator and denominator of A1 by λ∗ +n∆µ∗ +n, given that +the new denominator of A1 goes to 0, its new numerator must go to 0, i.e., we have +λn∆µn/(λ∗ +n∆µ∗ +n) → 1, +which cannot hold since λ|∆µn|2/(λ∗ +n|∆µ∗ +n|2) → 0 (assumption of Case 2.2.1) and |∆µn|/|∆µ∗ +n| → ∞. +Therefore, we must have max +� +|∆µ∗ +n|2, |∆v∗ +n| +� += |∆v∗ +n| for all n. By dividing both the numerator +and denominator of A2 by λ∗ +n∆v∗ +n, as the new denominator of A2 goes to 0, we would have +(λ∗ +n − λn)∆µn∆µ∗ +n +λ∗n∆v∗n ++ λn∆vn +λ∗n∆v∗n +− 1 → 0. +Since λn|∆vn| +λ∗n|∆v∗n| ≤ λn|∆µn|2 +λ∗n|∆v∗n| → 0 and (λ∗ +n−λn)/λ∗ +n → 1, the above limit shows that ∆µn∆µ∗ +n/∆v∗ +n → +1. Since (∆µn)2/|∆v∗ +n| → ∞, it implies that (∆µ∗ +n)2/∆v∗ +n → 0. Now, by combing the result that +A1 → 0 and A2 → 0, since F3 → 0, we can verify that it is equivalent to +A3 := +� +(λ∗ +n − λn)∆µn∆µ∗ +n(∆µn − 2∆µ∗ +n) +� +/3 + (λ∗ +n − λn)∆µ∗ +n∆vn +Q(Gn, G∗,n) +→ 0. +By dividing both the numerator and the denominator of A3 by λ∗ +n∆µn∆v∗ +n, we obtain +� +(λ∗ +n − λn)∆µn∆µ∗ +n(∆µn − 2∆µ∗ +n) +� +/3 + (λ∗ +n − λn)∆µ∗ +n∆vn +λ∗n∆µn∆v∗n +→ 0. +As (∆µ∗ +n)2/∆v∗ +n → 0 and ∆µn∆µ∗ +n/∆v∗ +n → 1, the above limit leads to ∆µ∗ +n∆vn/(∆µn∆v∗ +n) → −1/3. +Now, by studying A4 → 0 with the assumption that Q(Gn, G∗,n) ≍ λ∗ +n|∆µn|2|∆v∗ +n|, we eventually +get the equation 1/24 − 1/12 = 0, which is a contradiction. Therefore, Case 2.2.1 cannot hold. +33 + +Case 2.2.2: +λ∗ +n max +� +|∆µ∗ +n|2, |∆v∗ +n| +� +/λn|∆µn|2 ̸→ ∞. Therefore, as n is large enough, we would +have λ∗ +n max +� +|∆µ∗ +n|2, |∆v∗ +n| +� +≲ (λn|∆µn|2). Hence, we achieve under this case that Q(Gn, G∗,n) ≍ +λn|∆µn|4. Denote ∆µ∗ +n = ln +1 ∆µn, ∆vn = ln +2 (∆µn)2, and ∆v∗ +n = ln +3 (∆µn)2. From the assumptions of +Case 2.2.2, we would have ln +1 → 0 and ln +3 → 0 while ln +2 ̸→ ∞. Additionally, λ∗ +n max +� +(ln +1 )2, |ln +3 | +� +/λn ̸→ +0. By dividing the numerators and denominators of Ai by λn(∆µn)i for 1 ≤ i ≤ 3, we achieve the +following system of limits +λ∗ +nln +1 +λn +− 1 → 0, (λ∗ +n − λn)ln +1 +λn ++ ln +2 − λ∗ +nln +3 +λn +→ 0, λ∗ +n − λn +λn +�ln +1 − (ln +1 )2 +3 ++ ln +1ln +2 +� +→ 0. +(25) +As ln +1 → 0, the first limit in the above system implies that λ∗ +n(ln +1 )2/λn → 0. If we have max +� +(ln +1 )2, |ln +2 | +� += +|ln +1 |2 for all n, the previous result would mean that λ∗ +nln +3 /λn → 0. Therefore, the second limit in +(25) demonstrates that ln +2 → −1. However, plugging these results to the third limit in this system +would yield 1/3 − 1 = 0, which is a contradiction. Hence, we must have max +� +(ln +1 )2, |ln +2 | +� += |ln +3 | for +all n. Under this setting, by denoting λ∗ +nln +3 +λn +→ a as n → ∞, the first and second limit in (25) leads +to ln +2 → a − 1. With this result, the third limit in this system shows that a = 2/3. With these +results, by dividing both the numerator and denominator of A4 by λn(∆µn)4, we quickly achieve +the equation 1/24 − 5/72 = 0, which is a contradiction. Therefore, Case 2.2.2 cannot hold. +In sum, not all the coefficients of ∂|β|f +∂µβ (x|µ∗ +n, v∗ +n) as 1 ≤ |β| ≤ 8 go to 0. From here, by using +the same argument as that of Proposition 2 and Proposition 3, we achieve the result of part (b) of +the proposition. As a consequence, we reach the conclusion of the theorem. +B +Proofs for convergence rates and minimax lower bounds +In this appendix, we provide the proofs for the convergence rates of the MLE as well as the corre- +sponding minimax lower bounds introduced in Section B. +B.1 +Proof of Theorem 5 +(a) For any G1 = G1(λ1, µ1, Σ1) and G2 = G2(λ2, µ2, Σ2), we denote the following distance +d1(G1, G2) += +λ1||(µ1, Σ1) − (µ2, Σ2)||, +d2(G1, G2) += +|λ1 − λ2|2. +Even though d2(G1, G2) is a proper distance, it is clear that d(G1, G2) is not symmetric and only +satisfies a weak triangle inequality, i.e. we have +d1(G1, G3) + d1(G2, G3) ≥ min {d1(G1, G2), d1(G2, G1)} . +Therefore, we will utilize the modification of Le Cam method for nonsymmetric loss in Lemma 6.1 +of [12] to deal with such distance. We start with the following proposition +Proposition 5. Given that f satisfies assumption (S.1) in Theorem 5, we achieve for any r < 1 +that +(i) lim +ǫ→0 +inf +G1=(λ,µ1,Σ1),G2=(λ,µ2,Σ2) {h(pG1, pG2)/dr +1(G1, G2) : d1(G1, G2) ≤ ǫ} = 0. +34 + +(ii) lim +ǫ→0 +inf +G1=(λ1,µ,Σ),G2=(λ2,µ,Σ) {h(pG1, pG2)/dr +2(G1, G2) : d2(G1, G2) ≤ ǫ} = 0. +Proof. (i) For any sequences G1,n = (λn, µ1,n, Σ1,n) and G2,n = (λn, µ2,n, Σ2,n), we have +h2(pG1,n, pG2,n) +≤ +1 +λn +� (pG1,n(x) − pG2,n(x))2 +f(x|µ2,n, Σ2,n) +dx += +λn +� (f(x|µ1,n, Σ1,n) − f(x|µ2,n, Σ2,n))2 +f(x|µ2,n, Σ2,n) +dx +where the first inequality is due to +� +pG1,n(x) + +� +pG2,n(x) > +� +λnf(x|µ2,n, Σ2,n). By Taylor expan- +sion up to the first order, we have +f(x|µ1,n, Σ1,n) − f(x|µ2,n, Σ2,n) = +� +|α|=1 +(µ1,n − µ2,n)α1(Σ1,n − Σ2,n)α2 +α1!α2! +∂f +∂µα1∂Σα2 (x|µ2,n, Σ2,n) ++ +� +|α|=1 +(µ1,n − µ2,n)α1(Σ1,n − Σ2,n)α2 +α1!α2! +1 +� +0 +∂f +∂µα1∂Σα2 (x|µ2,n + t(µ1,n − µ2,n), Σ2,n + t(Σ1,n − Σ2,n))dt +Now, by choosing λ1−2r +n +∥(µ1,n, Σ1,n) − (µ2,n, Σ2,n)∥2−2r → 0, and ∥(µ1,n, Σ1,n) − (µ2,n, Σ2,n)∥ → 0 +and using condition (S.1), we can easily verify that h(pG1,n, pG2,n)/dr +1(G1,n, G2,n) → 0. Therefore, +we achieve the conclusion of part (i). +(ii) The argument for this part is essentially similar to that in part (i). In fact, for any two sequences +G′ +1,n = (λ1,n, µn, Σn) and G′ +2,n = (λ2,n, µn, Σn), we also obtain +h2(pG′ +1,n, pG′ +2,n) +d2r +2 (G′ +1,n, G′ +2,n) +≤ +(λ1,n − λ2,n)2−2r +(1 − λ1,n) ∧ λ1,n +� (h0(x|µ0, Σ0) − f(x|µn, Σn))2 +h0(x|µ0, Σ0) + f(x|µn, Σn) dx +≤ +2(λ1,n − λ2,n)2−2r +(1 − λ1,n) ∧ λ1,n +By choosing (λ1,n−λ2,n)2−2r/ {(1 − λ1,n) ∧ λ1,n} → 0, we also achieve the conclusion of part (ii). +Now, given G∗ = (λ∗, µ∗, Σ∗) and r < 1. Let C0 be any fixed constant. According to part +(i) of Proposition 5, for any sufficiently small ǫ > 0, there exists G′ +∗ = (λ∗, µ∗ +1, Σ∗ +1) such that +d1(G∗, G′ +∗) = d1(G′ +∗, G∗) = ǫ and h(pG∗, pG′∗) ≤ C0ǫr. By means of Lemma 6.1 of [12], we achieve +inf +�Gn∈Ξ +sup +G∈Ξ +EpG +� +λ2∥(�µn, �Σn) − (µ, Σ)∥2 +� +≥ ǫ2 +2 +� +1 − V (pn +G∗, pn +G′∗) +� +. +where pn +G∗ denotes the density of the n-iid sample X1, . . . , Xn. From there, +V (pn +G∗, pn +G′∗) +≤ +h(pn +G∗, pn +G′∗) += +� +1 − +� +1 − h2(pG∗, pG′∗) +�n +≤ +� +1 − (1 − C2 +0ǫ2r)n. +35 + +Hence, we obtain +inf +� +Gn∈Ξ +sup +G∈Ξ +EpG +� +λ2∥(�µn, �Σn) − (µ, Σ)∥2 +� +≥ ǫ2 +2 +� +1 − (1 − C2 +0ǫ2r)n. +By choosing ǫ2r = +1 +C2 +0n, we achieve +inf +�Gn∈Ξ +sup +G∈Ξ +EpG +� +λ2∥(�µn, �Σn) − (µ, Σ)∥2 +� +≥ c1n−1/r. +for any r < 1 where c1 is some positive constant. Using the similar argument, with the result of (ii) +in Proposition 5 we also immediately obtain the result +inf +� +Gn∈Ξ +sup +G∈Ξ +EpG +� +|�λn − λ|2 +� +≥ c2n−1/r. As a +consequence, we reach the conclusion of part (a) of the theorem. +(b) The proof of this part is a direct consequence of Theorem 2 and Theorem 1. +Indeed, for +�Gn = (�λn, �µn, �Σn) being the MLE as in equation (3), we have +EpG∗ +� +|ˆλn − λ∗| + λ∗∥(�µn, �Σn) − (µ∗, Σ∗)∥ +� Thm 2 +≲ +EpG∗V (p � +Gn, pG∗) ≤ EpG∗h(p � +Gn, pG∗) +Thm 1 +≲ +log n +√n +Because all inequalities are uniform in G∗, we achieve the conclusion of part (b) of the theorem. +B.2 +Proof of Theorem 6 +(a) Similar to the proof argument of part (a) of Theorem 5, we define +d3(G1, G2) += +λ1∥(∆µ1, ∆Σ1)∥∥(µ1, Σ1) − (µ2, Σ2)∥, +d4(G1, G2) += +|λ1 − λ2|∥(∆µ1, ∆Σ1)∥2. +for any G1 = G1(λ1, µ1, Σ1) and G2 = G2(λ2, µ2, Σ2). It is clear that both d3(G1, G2) and d4(G1, G2) +still satisfy weak triangle inequality. To achieve the conclusion of this part, it suffices to demonstrate +the following results +(i) There exists two sequences G1,n = (λn, µ1,n, Σ1,n) ∈ Ξ1(ln) and G2,n = (λn, µ2,n, Σ2,n) ∈ +Ξ1(ln) such that d3(G1,n, G2,n) → 0 and h(pG1,n, pG2,n)/dr +3(G1,n, G2,n) as n → ∞. +(ii) There exists two sequences G′ +1,n = (λ1,n, µn, Σn) ∈ Ξ1(ln) and G′ +2,n = (λ2,n, µn, Σn) ∈ Ξ1(ln) +such that d4(G1,n, G2,n) → 0 and h(pG′ +1,n, pG′ +2,n)/dr +4(G1,n, G2,n) as n → ∞. +for any r < 1. The proof argument for the above results can proceed in a similar fashion as that of +Proposition 5; therefore, it is omitted. We achieve the conclusion of part (a) of the theorem. +(b) Combining the result of Theorem 3 and the fact that D(G, G∗) ≍ D(G, G∗) for any G and G∗, +we immediately achieve the following convergence rates +sup +G∗∈Ξ +EpG∗ +� +(λ∗)2∥(∆µ∗, ∆Σ∗)∥2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 +� +≲ log2 n +n +, +sup +G∗∈Ξ +EpG∗ +� +∥(∆�µn, ∆�Σn)∥2∥(∆µ∗, ∆Σ∗)∥2|�λn − λ∗|2 +� +≲ log2 n +n +. +(26) +36 + +It is clear that the second result in (26) does not match with the second result in the conclusion +of part (b) of the theorem. To circumvent this issue, we utilize the fact that G∗ ∈ Ξ1(ln). Indeed, +notice that (�µn, �Σn) − (µ∗, Σ∗) = (∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗), we have +sup +G∗∈Ξ +EpG∗ +���(∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗) +��� +2 +∥(∆µ∗, ∆Σ∗)∥2 +≲ +log2 n +n(λ∗)2∥(∆µ∗, ∆Σ∗)∥4 → 0. +(27) +Hence, by the AM-GM inequality, we have +EpG∗∥(∆�µn, ∆�Σn)∥2(�λn − λ∗)2 +≥ 1 +2∥(∆µ∗, ∆Σ∗)∥2EpG∗(�λn − λ∗)2 − EpG∗∥(∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗)∥2(�λn − λ∗)2 += 1 +2∥(∆µ∗, ∆Σ∗)∥2 + + +EpG∗(�λn − λ∗)2 − +EpG∗ +���(∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗) +��� +2 +(�λn − λ∗)2 +∥(∆µ∗, ∆Σ∗)∥2 + + + +≳ ∥(∆µ∗, ∆Σ∗)∥EpG∗(�λn − λ∗)2, +(28) +uniformly in G∗, where in the last inequality we use (27) combining with the fact that |�λn − λ∗| is +uniformly bounded by 2. Hence, +EpG∗ +� +∥(∆µ∗, ∆Σ∗)∥4|�λn − λ∗|2 +� +≲ EpG∗ +� +∥(∆�µn, ∆�Σn)∥2∥(∆µ∗, ∆Σ∗)∥2|�λn − λ∗|2 +� +≲ log2(n) +n +, +which is the conclusion of the theorem. +B.3 +Proof of Theorem 7 +(a) Similar to the proof argument of part (a) of Theorem 5, we define +d5(G1, G2) += +λ1∥(µ1, Σ1) − (µ2, Σ2)∥4, +d6(G1, G2) += +|λ1 − λ2|∥(∆µ1, ∆Σ1)∥4. +for any G1 = G1(λ1, µ1, Σ1) and G2 = G2(λ2, µ2, Σ2). It is clear that d6(G1, G2) satisfies weak +triangle inequality while d5(G1, G2) no longer satisfies weak triangle inequality. In particular, we +have +d5(G1, G3) + d5(G2, G3) ≥ min {d5(G1, G2), d5(G2, G1)} +8 +. +A close investigation of Lemma 6.1 of [12] reveals that modified Le Cam method still works under +this setting of d5 metric. More specifically, for any ǫ > 0 the following holds +inf +� +Gn∈Ξ +sup +G∈Ξ2(ln) +EpG +� +d2 +5(G, �Gn) +� +≥ ǫ2 +128 +� +1 − V (pn +G1, pn +G2) +� +where G1, G2 ∈ Ξ2(ln) such that d5(G1, G2)∧d5(G1, G2) ≥ ǫ/4. From here, to achieve the conclusion +of part (a), it suffices to demonstrate for any r < 1 that +37 + +(i) There exists two sequences G1,n = (λn, µ1,n, Σ1,n) ∈ Ξ2(ln) and G2,n = (λn, µ2,n, Σ2,n) ∈ +Ξ1(ln) such that d5(G1,n, G2,n) → 0 and h(pG1,n, pG2,n)/dr +5(G1,n, G2,n) as n → ∞. +(ii) There exists two sequences G′ +1,n = (λ1,n, µn, Σn) ∈ Ξ2(ln) and G′ +2,n = (λ2,n, µn, Σn) ∈ Ξ1(ln) +such that d6(G1,n, G2,n) → 0 and h(pG′ +1,n, pG′ +2,n)/dr +6(G1,n, G2,n) as n → ∞. +Following the proof argument of Proposition 5, we can quickly verify the above results. +As a +consequence, we reach the conclusion of part (a) of the theorem. +(b) From the discussion after Theorem 3, we can show that: +Q(G, G∗) ≍ |λ − λ∗|(∥∆µ∥2∥∆Σ∥)(∥∆µ∗∥2∥∆Σ∗∥) + (∥µ − µ∗∥2 + ∥Σ − Σ∗∥)∥(λ(∥∆µ∥2 + ∥∆Σ∥) ++λ∗(∥∆µ∗∥2 + ∥∆Σ∗∥)). +Hence, from Theorem 4 combining with Theorem 1, we have +sup +G∗ +EpG∗(λ∗)2(∥�µn − µ∗∥4 + ∥�Σn − Σ∗∥2)(∥∆µ∗∥4 + ∥∆Σ∗∥2) ≲ log2(n) +n +sup +G∗ +EpG∗|�λn − λ∗|2(∥∆�µn∥4∥∆�Σn∥2)(∥∆µ∗∥4∥∆Σ∗∥2) ≲ log2(n) +n +. +Similar to the proof of Theorem 6 and with the definition of Ξ2(l2), we have +EpG∗|�λn − λ∗|2(∥∆�µn∥4∥∆�Σn∥2) ≳ (∥∆µ∗∥4∥∆Σ∗∥2)EpG∗|�λn − λ∗|2 +uniformly in G∗ ∈ Ξ2(l2). Hence, +sup +G∗∈Ξ2(l2) +EpG∗|�λn − λ∗|2(∥∆µ∗∥8∥∆Σ∗∥4) ≲ log2(n) +n +. +As a consequence, we obtain the conclusion of the theorem. +C +Proofs for auxiliary results +Lemma 1. For any r ≥ 1, we define +Dr(G, G∗) += +λ∥(∆µ, ∆Σ)∥r + λ∗∥(∆µ∗, ∆Σ∗)∥r +− +min {λ, λ∗} +� +∥(∆µ, ∆Σ)∥r + ∥(∆µ∗, ∆Σ∗)∥r − ∥(µ, Σ) − (µ∗, Σ∗)∥r +� +, +for any G and G∗. Then, we have W r +r (G, G∗) ≍ Dr(G, G∗) for any r ≥ 1 where Wr is the r-th +order Wasserstein distance. +Proof. Without loss of generality, we assume throughout the lemma that λ < λ∗. Therefore, we +obtain from the formulation of Dr(G, G∗) that +Dr(G, G∗) = (λ∗ − λ)||(∆µ∗, ∆Σ∗)||r + λ||(µ, Σ) − (µ∗, Σ∗)||r. +Direct computation of W r +r (G, G∗) yields three distinct cases: +38 + +Case 1: +If ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≥ ||(µ, Σ) − (µ∗, Σ∗)||r, then +W r +r (G, G∗) += +λ||(∆µ, ∆Σ)||r + λ∗||(∆µ∗, ∆Σ∗)||r +− +min {λ, λ∗} (||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r − ||(µ, Σ) − (µ∗, Σ∗)||r) += +Dr(G, G∗). +Case 2: +If ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r < ||(µ, Σ) − (µ∗, Σ∗)||r and λ + λ∗ ≤ 1, then +W r +r (G, G∗) += +λ||(∆µ, ∆Σ)||r + λ∗||(∆µ∗, ∆Σ∗)||r += +(λ∗ − λ)||(∆µ∗, ∆Σ∗)||r + λ(||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r). +From Cauchy-Schartz’s inequality, we have ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≳ ||(µ, Σ) − (µ∗, Σ∗)||r. +Therefore, under Case 2 we have ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≍ ||(µ, Σ) − (µ∗, Σ∗)||r, which +directly implies that W r +r (G, G∗) ≍ Dr(G, G∗). +Case 3: +If ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r < ||(µ, Σ) − (µ∗, Σ∗)||r and λ + λ∗ > 1, then +W r +r (G, G∗) += +(1 − λ∗)||(∆µ, ∆Σ)||r + (1 − λ)||(∆µ∗, ∆Σ∗)||r ++ +(λ + λ∗ − 1)||(µ, Σ) − (µ∗, Σ∗)||r += +(λ∗ − λ)||(∆µ∗, ∆Σ∗)||r + (1 − λ∗)(||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r) ++ +(λ∗ + λ − 1)||(µ, Σ) − (µ∗, Σ∗)||r. +Since ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≍ ||(µ, Σ) − (µ∗, Σ∗)||r, we achieve +(1 − λ∗)(||(∆µ, ∆Σ)||r ≍ (1 − λ∗)||(µ, Σ) − (µ∗, Σ∗)||r. +Therefore, we also have W r +r (G, G∗) ≍ Dr(G, G∗) under Case 3. +Combining the results from these cases, we reach the conclusion of the lemma. +39 + diff --git a/FNFKT4oBgHgl3EQfai5n/content/tmp_files/load_file.txt b/FNFKT4oBgHgl3EQfai5n/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b7e2138d3efcf4c9df00959a6514044636e5e9d --- /dev/null +++ b/FNFKT4oBgHgl3EQfai5n/content/tmp_files/load_file.txt @@ -0,0 +1,1078 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf,len=1077 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='11808v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='ST] 27 Jan 2023 Optimal Rate for Parameter Estimation in Matrix-variate Deviated Models Nhat Ho† Dat Do⋄ Huy Nguyen† Khai Nguyen† University of Texas, Austin†;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' University of Michigan, Ann Arbor⋄ January 30, 2023 Abstract We study the maximum likelihood estimation (MLE) in the matrix-variate deviated model where the data are generated from the density function (1 − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗) where h0 is a known function, λ∗ ∈ [0, 1] and (µ∗, Σ∗) are unknown parameters to estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The main challenges in deriving the convergence rate of the MLE mainly come from two issues: (1) The interaction between the function h0 and the density function f;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (2) The deviated proportion λ∗ can go to the extreme points of [0, 1] as the sample size goes to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To address these challenges, we develop the distinguishability condition to capture the linear independent relation between the function h0 and the density function f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We then provide comprehensive convergence rates of the MLE via the vanishing rate of λ∗ to 0 as well as the distinguishability of h0 and f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 1 Introduction The goodness-of-fit test [9] is one of the foundational tools in statistics with several applications in data-driven scientific fields, namely kernel Stein discrepacy [22, 26], point processes [31] and Bayesian statistics [27], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given a sample set of data and a pre-specified distribution with density function h0, the test indicates whether the samples are reasonably distributed according to h0 (null hypothesis) or to another family of distributions {p(·|θ) : θ ∈ Θ} (alternative hypothesis).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It is worth noting that knowledge about the null hypothesis distribution can come from prior knowledge of scientists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A key to understand the statistical efficiency of testing is via the likelihood ratio and the maximum likelihood estimation (MLE) methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' While traditional testing problems often assume the null distribution h0 = p(·|θ0) and the alternative one p(·|θ) are from a single simple family of distributions such as exponential families, it is also necessary to comprehend the statistical properties of a testing problem in which the alternative f(·|θ) can be deviated from h0 by a distribution from a potentially different family.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Specifically, in this paper, we consider the family of distributions named matrix-variate deviated model with density functions defined as follows: pG(x) := (1 − λ)h0(x) + λf(x|µ, Σ), (1) where G := (λ, µ, Σ) are the model’s parameters with λ ∈ [0, 1] being the deviated proportion (from h0) and (µ, Σ) ∈ Θ × Ω are parameters of a vector-matrix family of distributions f, where Θ ⊂ Rd1 and Ω ⊂ Rd2×d2 being compact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' When λ = 0, this recovers the null hypothesis distribution h0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As the core of the hypothesis testing is studied via the MLE of our model under the alternative hypothesis, we directly investigate the behavior of the MLE of the deviated model type in the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 1 Problem setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Suppose that we observe n i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' samples X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' , Xn from the true matrix- variate deviated model: pG∗(x) := (1 − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗), (2) where G∗ = (λ∗, µ∗, Σ∗) are unknown parameters with λ∗ ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Throughout the paper, we allow G∗ to change with the sample size n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To facilitate our presentation, we suppress the dependence of G∗ on n, and then estimate G∗ from the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The main focus of this paper is to establish both convergence rate and minimax rate for parameter estimation via the MLE approach, which is given by: �Gn ∈ arg max G∈Ξ n � i=1 log pG(Xi), (3) where �Gn = (�λn, �µn, �Σn) and Ξ := [0, 1] × Θ × Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Contribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' There are two main challenges in studying the convergence rate of the MLE �Gn: (1) The interaction between the function h0 and the density function f, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', h0 is the mixture of densities f and (µ∗, Σ∗) approaches one of the components of h0 as the sample size n goes to infinity;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (2) The deviated proportion λ∗ can go to the extreme points of [0, 1] as the sample size goes to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To address these issues, we first develop the distinguishability condition to capture the linear independent relation between the function h0 and the density function f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We then study the optimal convergence rate of parameters under both distinguishable and non-distinguishable settings of the matrix-variate deviated model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Our theoretical results can be summarized as follows: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Distinguishable settings: We demonstrate that as long as the function h0 and the den- sity function f are distinguishable, the convergence rate of �λn to λ∗ is O(n−1/2) while the con- vergence rate of (�µn, �Σn) to (µ∗, Σ∗) is determined by the rate that λ∗ goes to 0 as follows: λ∗∥(�µn, �Σn) − (µ∗, Σ∗)∥ = O(n−1/2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It indicates that if λ∗ goes to 0 at a rate slower than n−1/2, the convergence rate of estimating (µ∗, Σ∗) is slower than parametric rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Non-distinguishable settings: When h0 and f are not distinguishable, the convergence rates of the MLE become complicated to characterize.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To shed light on some of the behaviors of the MLE under the non-distinguishale settings of matrix-variate deviated model, we specifically study the simple setting when h0 belongs to the same family as f, namely, h0(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') = f(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='|µ0, Σ0) for some (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To precisely characterize the rates of the MLE under this setting, we consider the second-order strong identifiability of f, which requires the linear independence up to second order derivatives of f with respect to its parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The second-order identifiability had also been considered in the literature to investigate the convergence rate of parameter estimation in finite mixtures [8, 24, 17, 16, 15, 14, 23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Strongly identifiable and non-distinguishable settings: When f is strongly identi- fiable in the second order, we demonstrate that ∥(∆µ∗, ∆Σ∗)∥2|�λn − λ∗| = O(n−1/2) and λ∗∥(∆µ∗, ∆Σ∗)∥∥(�µn, �Σn) − (µ∗, Σ∗)∥ = O(n−1/2), where ∆µ = µ − µ0 and ∆Σ = Σ − Σ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It indicates that the convergence rate of �λn to λ∗ depends on that of (µ∗, Σ∗) to (µ0, Σ0) while the convergence rate of (�λn, �Σn) to (λ∗, Σ∗) depends on both the rate of λ∗ to 0 and the rate of (µ∗, Σ∗) to (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' These results are strictly different from those in the distinguishable settings, which is mainly due to the non-distinguishability between h0 and f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 2 Weakly identifiable and non-distinguishable settings: When f is weakly identifiable, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', it is not strongly identifiable in the second order, we specifically consider the popular set- ting when f is the density of a multivariate Gaussian distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The loss of the strong iden- fiability of the Gaussian distribution is due to the partial differential equation (PDE) between the location and scale parameters (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' equation (9)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to that PDE, the convergence rate of the MLE under that setting exhibits very different behaviors from those under the strongly identifiable setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In particular, we prove that � ∥∆µ∗∥8 + ∥∆Σ∗∥4� |�λn−λ∗|2 = O(n−1) and (λ∗)2(∥∆µ∗∥4 + ∥∆Σ∗∥2)(∥�µn − µ∗∥4 + ∥�Σn − Σ∗∥2) = O(n−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Notably, there is a mismatch in the orders of convergence rates of the location and covariance matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, the rate of the deviated mixing proportion also depends on different orders of µ∗ to µ0 and Σ∗ to Σ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Such rich behaviors of the MLE are mainly due to the PDE between the location and scale parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Related work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' When f is the density of a location Gaussian distribution, the convergence rate of parameter estimation in the deviated model had been studied in the work of [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since the location Gaussian distribution is a special case of the strongly identifiable distribution, our result in the strongly identifiable and non-distinguishable settings is a generalization of the results in [12], but with a different proof technique as their proof technique relies strictly on the properties of the location Gaussian distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The hypothesis testing problem related to the matrix-variate deviated model had been considered in previous work, including the problem of detecting sparse homogeneous and heteroscedastic mix- tures [11, 2, 1, 3, 29], the problem of determining the number of components [6, 21, 7, 18, 20], and the problem of multiple testing [25, 10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Notations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For any a, b ∈ R, we denote a∨b = max {a, b} and a∧b = min {a, b}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Next, we say that h0 is identical to f if h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For each parameter G ∈ Ξ, let EpG be the expectation taken with respect to product measure with density pG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Lastly, for any two density functions p and q (with respect to the Lebesgue measure m), the Total Variance distance is given by V (p, q) := 1 2 � |p(x) − q(x)|dm(x), while we define the squared Hellinger distance as h2(p, q) := 1 2 � [ � p(x) − � q(x)]2dm(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 2 Preliminary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 Identifiability Condition Our principal goal in this paper is to assess the statistical efficiency of parameter estimation from the MLE method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To do that, we should be able to guarantee the parameter identifiability of the deviated model (2), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', if pG(x) = pG∗(x) for almost surely x ∈ X where G = (λ, µ, Σ), then G ≡ G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' That identifiability condition leads to the following notion of distinguishability between the density function h0(·) and the family of density functions {f(·|µ, Σ) : (µ, Σ) ∈ Θ × Ω}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We say that the family of density functions {f(·|µ, Σ), (µ, Σ) ∈ Θ × Σ} (or in short, f) is distinguishable from h0 if the following holds: A1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For any two distinct components (µ1, Σ1) and (µ2, Σ2), if we have real coefficients ηi for 3 1 ≤ i ≤ 3 such that η1η2 ≤ 0 and η1f(x|µ1, Σ1) + η2f(x|µ2, Σ2) + η3h0(x) = 0, for almost surely x ∈ X, then η1 = η2 = η3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We can verify that as long as f is distinguishable from h0, the parameter identifiability of our matrix-variate deviated model follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In particular, assume that there exists G = (λ, µ, Σ) such that (1 − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗) = (1 − λ)h0(x) + λf(x|µ, Σ), (4) for almost surely x ∈ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The above equation is equivalent to (λ − λ∗)h0(x) + λ∗f(x|µ∗, Σ∗) − λf(x|µ, Σ) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that f is distinguishable from h0, then equation (4) indicates that if (µ, Σ) ̸= (µ∗, Σ∗), then we have λ = λ∗ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since λ∗ ̸= 0 from our assumption, we obtain that (µ, Σ) = (µ∗, Σ∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a result, equation (4) becomes (λ∗−λ)h0(x)+(λ∗−λ)f(x|µ, Σ) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By apply- ing the distinguishability condition again, we get λ = λ∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, the matrix-variate deviated model (2) is identifiable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In the following example, we demonstrate that the distinguishability condition is (not) satisfied by many choices of h0 and f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Example 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (a) Assume that f is in a location family of density functions, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', f(x|µ, Σ) = fΣ(x − µ) for all x where Σ is a fixed covariance matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If h0(x) ̸= f(x) for almost surely x ∈ X, then f is distinguishable from h0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) When h0 is a finite mixture of multivariate Gaussian densities and f belongs to a class of mul- tivariate Student’s density function with any fixed odd degree of freedom ν > 1, we get that f is distinguishable from h0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (c) When f is identical to h0, then f is not distinguishable from h0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 Convergence Rate of Density Estimation Our strategy to obtain the convergence rate of the MLE �Gn is by first establishing the convergence rate of density p � Gn and then studying the geometric inequalities between the parameters’ space and densities’ space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the former, the standard method is to use the empirical process theory [13, 28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the latter step, we will investigate those inequalities for various settings of distinguishability in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now we proceed to describe the convergence rate of density estimation under the Hellinger distance and give a general result for the matrix-variate deviated model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' This convergence rate can be deduced from the complexity of the set: P 1/2 k (Ξ, ǫ) = � ¯p1/2 G : G ∈ Ξ, h(¯pG, pG∗) ≤ ǫ � , (5) 4 where for any G ∈ Ξ, we denote ¯pG := (pG + pG∗)/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We measure the complexity of this class through the bracketing entropy integral JB(ǫ, P 1/2 k (Ξ, ǫ)) = � ǫ ǫ2/213 H1/2 B (u, P 1/2 k (Ξ, ǫ))du ∨ ǫ, (6) where HB(ǫ, P) denotes the ǫ-bracketing entropy number of a metric space P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We require the following assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given a universal constant J > 0, there exists N > 0, possibly depending on Θ and k, such that for all n ≥ N and all ǫ > (log n/n)1/2, JB(ǫ, P 1/2 k (Ξ, ǫ)) ≤ J√nǫ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that Assumption A2 holds, and let k ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, there exists a constant C > 0 depending only on Θ and k such that for all n ≥ 1, sup G∗∈Ξ EpG∗h(p � Gn, pG∗) ≤ C � log n/n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, in order to get the convergence rate for density estimators based on the MLE method, we only need to check Assumption A2, which holds true for several parametric models [28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For our model, we give an example that it holds for a general class of f and h0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Let f be a location-scale Gaussian density function with parameters (µ, Σ) ∈ Θ×Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Suppose that there exist positive constants a, ℓ, u such that Θ = [−a, a]d and eigenvalues of Σ are bounded in [ℓ, u] for any Σ ∈ Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Additionally, we assume that the function h0 is bounded with tail − log h0(x) ≳ ∥x∥q for some q > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, the corresponding matrix-variate deviated model defined in equation (1) satisfies assumption A2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 3 From the Convergence Rate of Densities to Rate of Parameters The objective of this section is to develop a general theory according to which a small distance between pG and pG∗ under the Hellinger distance (or Total Variation distance) would imply that G and G∗ is also close under appropriate distance where G = (λ, µ, Σ) and G∗ = (λ∗, µ∗, Σ∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By combining those results with Theorem 1, we can obtain the convergence rate for parameter estimation (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Section 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The distinguishability condition between h0 and f implicitly requires that pG = pG∗ would entail G = G∗;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' however, to obtain quantitative bounds for their Total Variation distance, we will need stronger notions of both distinguishability and classical parameter identifiability, ones which involve higher order derivatives of the densities h0 and f, taken with respect to mixture model parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Throughout the rest of this section, we denote G = (λ, µ, Σ) and G∗ = (λ∗, µ∗, Σ∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 Distinguishable Settings Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We say that f is distinguishable from h0 up to the first order if f is first time differentiable in (µ, Σ), and the following holds: 5 (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1) For any component (µ′, Σ′) ∈ Θ×Ω, if we have real coefficients η, τα for all α = (α1, α2), α1 ∈ Nd1, α2 ∈ Nd2×d2, |α| = |α1|1 + |α2| ≤ 1 such that ηh0(x) + � |α|≤1 τα ∂|α|f ∂µα1∂Σα2 (x|µ′, Σ′) = 0 for all x ∈ X, then η = τα = 0 for all |α| ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We can verify that the examples from part (a) and part (b) of Example 1 satisfy the first-order distinguishability condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Next, we introduce a notion of uniform Lipschitz condition in the following definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Definition 3 (Uniform Lipschitz).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We say that f admits uniform Lipschitz condition up to the first order if the following holds: there are positive constants δ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' δ2 such that for any R1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' R2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' R3 > 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' γ1 ∈ Rd1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' γ2 ∈ Rd2×d2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' R1 ≤ λ1/2 min(Σ1) ≤ λ1/2 max(Σ2) ≤ R2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∥µ∥ ≤ R3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' µ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' µ2 ∈ Θ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ2 ∈ Ω,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we can find positive constants C(R1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' R2) and C(R3) such that for all x ∈ X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ����γ⊤ 1 �∂f ∂µ(x|µ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ) − ∂f ∂µ(x|µ2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ) ����� ≤ C(R1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' R2)∥µ1 − µ2∥δ1∥γ1∥,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ����tr �� ∂f ∂Σ(x|µ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ1) − ∂f ∂Σ(x|µ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ2) �⊤ γ2 ����� ≤ C(R3)∥Σ1 − Σ2∥δ2∥γ2∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, equipped with the strong distinguishability in the first order and uniform Lipschitz conditions, we have the following results characterizing the behavior of V (pG, pG∗) regarding the variation of G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that f is distinguishable from h0 up to the first order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, f admits uniform Lipschitz condition up to the first order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For any G and G∗, we define K(G, G∗) := |λ − λ∗| + (λ + λ∗)∥(µ, Σ) − (µ∗, Σ∗)∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, the following holds: C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='K(G, G∗) ≤ V (pG, pG∗) ≤ C1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='K(G, G∗), for all G and G∗ where C and C1 are two positive constants depending only on Θ, Ω, and h0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' See Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 for the proof of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since the MLE approach yields the convergence rate n−1/2 up to some logarithmic factor for pG∗ under the first order uniform Lipschitz condition of f, the result of Theorem 2 directly yields the convergence rate n−1/2 up to some logarithmic factor for G∗ under metric K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' This entails that the estimation of weight λ∗ converges at rate n−1/2 up to some logarithmic factor while the convergence rate of estimating (µ∗, Σ∗) is typically much slower than n−1/2 as it depends on the rate of convergence of λ∗ to 0 (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 Non-distinguishable Settings When f is not distinguishable to h0 up to the first order, the bound in Theorem 2 may not hold in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In this section, we investigate the inverse bounds under the specific settings of non- distinguishable in the first-order models when h0 belongs to the family f(·|µ, Σ), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', h0(x) = 6 f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Our studies are divided into two separate regimes of f: the first setting is when f is strongly identifiable in the second order (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Definition 4), while the second setting is when it is not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the simplicity of the presentation in the paper, we define (∆µ, ∆Σ) = (µ − µ0, Σ − Σ0) for any element (µ, Σ) ∈ Θ × Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Definition 4 (Strong Identifiability).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We say that f is strongly identifiable in the second order if f is twice differentiable in (µ, Σ) and the following holds: (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2) For any positive integer k, given k distinct pairs (µ1, Σ1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' , (µk, Σk), if we have α(i) η such that 2 � ℓ=0 � |η|=ℓ k � i=1 α(i) η ∂|η|f ∂µη1∂Ση2 (x|µi, Σi) = 0, for almost all x ∈ X, then α(i) η = 0 for all i ∈ [k] and |η| ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 Strongly Identifiable Settings Now, we have the following result regarding the lower bound of V (pG, pG∗) under the strongly identifiable settings of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ and f is strongly identifiable in the second order and admits uniform Lipschitz condition up to the second order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, we denote D(G, G∗) := λ∥(∆µ, ∆Σ)∥2 + λ∗∥(∆µ∗, ∆Σ∗)∥2 − min {λ, λ∗} � ∥(∆µ, ∆Σ)∥2∥(∆µ∗, ∆Σ∗)∥2� + � λ∥(∆µ, ∆Σ)∥ + λ∗∥(∆µ∗, ∆Σ∗)∥ � ∥(µ, Σ) − (µ∗, Σ∗)∥, for any G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, there exists a positive constant C depending only on Θ, Ω, and (µ0, Σ0) such that V (pG, pG∗) ≥ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='D(G, G∗), for all G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The proof of Theorem 3 is in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Several remarks regarding Theorem 3 are in order: (i) For any G and G∗, by defining D(G, G∗) := |λ∗ − λ|∥(∆µ, ∆Σ)∥∥(∆µ∗, ∆Σ∗)∥ + ∥(µ, Σ) − (µ∗, Σ∗)∥ � λ∥(∆µ, ∆Σ)∥ + λ∗∥(∆µ∗, ∆Σ∗)∥ � , we can verify that 1/2 ≤ D(G, G∗)/D(G, G∗) ≤ 2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', D(G, G∗) ≍ D(G, G∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The reason that we prefer to use the formation of D(G, G∗) over that of D(G, G∗) is not only due to the convenience of the proof argument of Theorem 3 later in Appendix A but also due to its partial connection with Wasserstein metric that we are going to discuss in the next remark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 7 (ii) When f is a multivariate location family and is identical to h0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', µ0 = 0, it was demonstrated recently in [12] that V (pG, pG∗) ≳ |λ − λ∗|∥µ∥∥µ∗∥ + (λ∗∥µ∗∥ + λ∥µ∥)∥µ − µ∗∥, (7) which is also the key result for establishing the convergence rates of parameter estimation in their work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, their proof technique only works for the location family and it is unclear what is the sufficient condition for the family of density functions beyond the location family such that the inequality (7) will hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As the location family is strongly identifiable in the second order, we can verify that the lower bound in Theorem 3 and inequality (7) are in fact similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, the result in Theorem 2 gives a generalization of inequality (7) in [12] under the strongly identifiable in the second order setting of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (iii) As being indicated in [12], we can further lower bound the right hand side of inequality (7) in terms of the second order Wasserstein metric W2 [30] between G and G∗ when we present G and G∗ as two discrete probability measures with two components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In particular, with an abuse of the notations we denote that G = (1 − λ)δ(µ0,Σ0) + λδ(µ,Σ) and G∗ = (1 − λ)δ(µ0,Σ0) + λδ(µ∗,Σ∗), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', we think of G and G∗ as two mixing measures with one fixed atom to be (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In light of Lemma 1 in Appendix C, we have W 2 2 (G, G∗) ≍ λ∥(∆µ, ∆Σ)∥2 + λ∗∥(∆µ∗, ∆Σ∗)∥2 − min {λ, λ∗} � ∥(∆µ, ∆Σ)∥2 + ∥(∆µ∗, ∆Σ∗)∥2 � + min {λ, λ∗} ∥∥(µ, Σ) − (µ∗, Σ∗)∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, D(G, G∗) and W 2 2 (G, G∗) share the similar term λ∥(∆µ, ∆Σ)∥2 + λ∗∥(∆µ∗, ∆Σ∗)∥2 − min {λ, λ∗} � ∥(∆µ, ∆Σ)∥2 + ∥(∆µ∗, ∆Σ∗)∥2 � in their formulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, as λ∥(∆µ, ∆Σ)∥ + λ∗∥(∆µ∗, ∆Σ∗)∥ ≥ min {λ, λ∗} ∥∥(µ, Σ) − (µ∗, Σ∗)∥, the remaining term in D(G, G∗) is stronger than that of W 2 2 (G, G∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Moreover, as λ = λ∗, we further obtain that D(G, G∗) W 2 2 (G, G∗) ≍ ∥(∆µ, ∆Σ)∥ + ∥(∆µ∗, ∆Σ∗)∥ ∥(µ, Σ) − (µ∗, Σ∗)∥ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, as long as the right hand side term in the above display goes to ∞, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', ||(∆µ + ∆µ∗, ∆Σ + ∆Σ∗)|| → 0, we will have D(G, G∗)/W 2 2 (G, G∗) → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' This strong refinement of Wasserstein metric is possible due to the special structure of G and G∗ as one of their components is always fixed to be (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (iv) Under the setting when G∗ is varied, d1 = 1, and d2 = 0, by means of Fatou’s lemma the result from Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='6 in [14] yields the following bound V (pG, pG∗) ≥ C′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='W 3 3 (G, G∗), (8) if the kernel density function f is 4-strongly identifiable (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 in [14]) and satisfies uniform Lipschitz condition up to the fourth order where C′ is some positive constant depending only on G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since D(G, G∗) ≳ W 2 2 (G, G∗) ≥ W 2 1 (G, G∗) ≳ W 3 3 (G, G∗), it indicates that the bound in Theorem 3 is much tighter than that in equation (8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The loss of efficiency in equation (8) is again due to the special structures of G and G∗ as one of their components is always fixed to be 8 (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (v) When G∗ is fixed such that (µ∗, Σ∗) ̸= (µ0, Σ0), we can verify that C1(G∗)K(G, G∗) ≤ V (pG, pG∗) ≤ C2(G∗)K(G, G∗) where C1(G∗) and C2(G∗) are some positive constants depending only on G∗, Θ, Ω, and (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since D(G, G∗) ≲ K(G, G∗), the weaker bound of V (pG, pG∗) in Theorem 3 can be interpreted as a compensation for the variation of G∗ around (0, (µ0, Σ0)) within the space (0, 1)×(Θ×Ω\\ {(µ0, Σ0)}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Unlike the convergence rate results from the strongly distinguishable in the first order setting be- tween f and h0 in Theorem 2, the convergence rate of λ∗ under the setting of Theorem 3 will depend on the rate of convergence of ∥(∆µ∗, ∆Σ∗)∥2 to 0 (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Additionally, the convergence rate of estimating (µ∗, Σ∗) will be determined based on the convergence rates of λ∗ and (∆µ∗, ∆Σ∗) to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 Weakly Identifiable Settings Thus far, as h0 belongs to the family f, our results regarding the lower bounds between pG and pG0 under Total Variation distance rely on the strongly identifiable in the second order assumption of kernel f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, there are various families of density functions that do not satisfy such an assumption, which we refer to as the weakly identifiable condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To illustrate the non-uniform natures of V (pG, pG∗) under the weakly identifiable condition of f, we consider specifically a popular setting of f in this section: multivariate location-covariance Gaussian kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Location-covariance multivariate Gaussian kernel: As indicated in the previous work in the literature [5, 19, 16], if f is a family of multivariate location-covariance Gaussian distributions in d dimension, it exhibits the following partial differential equation (PDE) with respect to the location and covariance parameter ∂2f ∂µ2 (x|µ, Σ) = 2 ∂f ∂Σ(x|µ, Σ), (9) for any x ∈ Rd and (µ, Σ) ∈ Θ×Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We can verify that this structure leads to the loss of the second- order strong identifiability condition of the Gaussian kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Note that, the PDE structure of the Gaussian kernel has been shown to lead to very slow convergence rates of parameter estimation under general over-fitted Gaussian mixture models (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 in [16]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the setting of the matrix-variate deviated model, since the parameters λ∗ and (µ∗, Σ∗) are allowed to vary with the sample size, we may expect that the estimation of these parameters will also suffer from the very slow rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In fact, we achieve the following lower bound of V (pG, pG∗) under the multivariate location-covariance Gaussian kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ and f is a family of multivariate location-covariance Gaussian distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We denote Q(G, G∗) := λ(∥∆µ∥4 + ∥∆Σ∥2) + λ∗(∥∆µ∗∥4 + ∥∆Σ∗∥2) − min {λ, λ∗} � ∥∆µ∥4 + ∥∆Σ∥2 + ∥∆µ∗∥4 + ∥∆Σ∗∥2 � + � λ(∥∆µ∥2 + ∥∆Σ∥) + λ∗(∥∆µ∗∥2 + ∥∆Σ∗∥) � × � ∥µ − µ∗∥2 + ∥Σ − Σ∗∥ � , 9 for any G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, we can find a positive constant C depending only on Θ, Ω, and (µ0, Σ0) such that V (pG, pG∗) ≥ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='Q(G, G∗), for any G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' See Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='3 for the proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A few comments with Theorem 4 are in order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (i) Different from the formulation of D(G, G∗) in Theorem 3 where we have the same power between µ and Σ, there is a mismatch of power between ∥∆µ∥2, ∥∆µ∗∥2 and ∥∆Σ∥, ∥∆Σ∗∥ in the formulation of Q(G, G∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' This interesting phenomenon is mainly due to the structure of the partial differential equation (9) where the second-order derivative of the location parameter and first-order derivative of the covariance parameter is linearly dependent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (ii) If we denote Q′(G, G∗) := λ(∥∆µ∥4 + ∥∆Σ∥2) + λ∗(∥∆µ∗∥4 + ∥∆Σ∗∥2) − min {λ, λ∗} � ∥∆µ∥4 + ∥∆Σ∥2 + ∥∆µ∗∥4 + ∥∆Σ∗∥2 � + min {λ, λ∗} � ∥µ − µ∗∥4 + ∥Σ − Σ∗∥2 � , then we can verify that Q(G, G∗) ≳ Q′(G, G∗) for any G, G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If we treat G and G∗ as two- components measures as in the remark (iii) after Theorem 3, we would have Q′(G, G∗) ≍ W 4 4 (G1, G1,∗) + W 2 2 (G2, G2,∗), (10) where G1 = (1 − λ)δµ′ 0 + λδµ, G2 = (1 − λ)δΣ′ 0 + λδΣ and similarly for G1,∗ and G2,∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Here, (µ0, Σ0) = (µ′ 0, Σ′ 0), and W2, W4 are respectively second and fourth order Wasserstein metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The formulations of Q′(G, G∗), therefore, can be thought as a combination of two Wasserstein metrics: one is with only parameter µ and another one is only with parameter Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The division into two Wasserstein metrics can be traced back again to the PDE structure in equation (9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If λ = λ∗ and (∥∆µ∥2 +∥∆µ∗∥2 +∥∆Σ∥+∥∆Σ∗∥)/ � ∥µ−µ∗∥2 +∥Σ−Σ∗∥ � → ∞, we will have that Q(G, G∗)/Q′(G, G∗) → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It proves that the result from Theorem 4 under multivariate setting of Gaussian kernel is a strong refinement of the summation of Wasserstein metrics regarding location and covariance parameter in equation (10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (iii) Similar to the comments after Theorem 3, as G∗ is fixed and (µ∗, Σ∗) ̸= (µ0, Σ0) we also can verify that C1(G∗)K(G, G∗) ≤ V (pG, pG∗) ≤ C2(G∗)K(G, G∗) where C1(G∗) and C2(G∗) are some positive constants depending only on G∗, Θ, Ω, and (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' When G∗ is varied, as long as G∗ does not converge to (0, (µ0, Σ0)), then we will still have inf G∗ C(G∗) > 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', metric K is still sufficient to capture the variation of pG around pG∗ under 10 L2 norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, when G∗ indeed converges to (0, (µ0, Σ0)), our result in Theorem 4 implies that a much stronger compensation of efficiency is needed to capture the variation of pG around pG∗ under Total Variation distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A consequence of Theorem 4 is that the convergence rate of estimating λ∗ is now determined by ∥∆µ∗∥4+∥∆Σ∗∥2, instead of ∥(∆µ∗, ∆Σ∗)∥2 as in the strongly identifiable in the second order setting of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, we also encounter a phenomenon that the rate of convergence of estimating Σ∗ is much faster than that of estimating µ∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In particular, estimating Σ∗ depends on the rate in which λ∗(∥µ∗∥2 + ∥Σ∗∥) converges to 0 while estimating µ∗ relies on square root of this rate (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 4 Minimax Lower Bounds and Convergence Rates of Parameter Estimation In this section, we study the convergence rates of MLE �Gn as well as minimax lower bounds of estimating G∗ under various settings of h0 and f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Firstly, we start with the distinguishable regime of h0 and f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Distinguishable settings) Assume that classes of densities h0 and f satisfy the conditions in Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, we achieve that (a) (Minimax lower bound) Assume that f satisfies the following assumption S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: (S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1) sup ∥(µ,Σ)−(µ′,Σ′)∥≤c0 � � ∂|α|f(x|µ,Σ) ∂µα1∂Σα2 �2 f(x|µ′, Σ′) dx < ∞ for some sufficiently small c0 > 0, where α1 ∈ Nd1, α2 ∈ Nd2 in the partial derivative of f take any combination such that |α| = |α1| + |α2| ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then for any r < 1, there exist two universal positive constants c1 and c2 such that inf � Gn∈Ξ sup G∈Ξ EpG � λ2∥(�µn, �Σn) − (µ, Σ)∥2 � ≥ c1n−1/r, inf � Gn∈Ξ sup G∈Ξ EpG � |�λn − λ|2 � ≥ c2n−1/r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Here, the infimum is taken over all sequences of estimates �Gn = (�λn, �µn, �Σn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) (MLE rate) Let �Gn be the MLE defined in equation (3), and the family {pG : G ∈ Ξ} satisfies condition A2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, we have the convergence rate for the MLE: sup G∗∈Ξ EpG∗ � (λ∗)2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 � ≲ log2 n n , sup G∗∈Ξ EpG∗ � |�λn − λ∗|2 � ≲ log2 n n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof of Theorem 5 is in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The results of Theorem 5 imply that even though we still can estimate λ∗ at the standard rate n−1/2, the convergence rate of (�µn, �Σn) to (µ∗, Σ∗) strictly 11 depends on the vanishing rate of λ∗ to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, the convergence rate of estimating (µ∗, Σ∗) can be generally slower than n−1/2 as long as λ∗ goes to 0 at a rate slower than n−1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Our next result investigates the behaviors of �Gn in the case h0 is identical to f, and f is strongly identifiable up to the second order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Strongly identifiable and non-distinguishable settings) Assume that classes of densities h0 and f satisfy the conditions in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We define Ξ1(ln) := � G = (λ, µ, Σ) ∈ Ξ : ln min 1≤i≤d1 1≤u,v≤d2 {|(∆µ)i|2, |(∆Σ)uv|2}√n ≤ λ � , for any sequence {ln}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, we achieve (a) (Minimax lower bound) Assume that f satisfies assumption S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 in Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then for any r < 1 and sequence {ln}, there exist two universal positive constants c1 and c2 such that inf � Gn∈Ξ sup G∈Ξ1(ln) EpG � λ2∥(∆µ, ∆Σ)∥2∥(�µn, �Σn) − (µ, Σ)∥2 � ≥ c1n−1/r, inf � Gn∈Ξ sup G∈Ξ1(ln) EpG � ∥∆µ, ∆Σ)∥4|�λn − λ|2 � ≥ c2n−1/r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) (MLE rate) Let �Gn be the MLE defined in equation (3), and the family {pG : G ∈ Ξ} satisfies condition A2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, for any sequence {ln} such that ln/ log n → ∞, sup G∗∈Ξ1(ln) EpG∗ � (λ∗)2∥(∆µ∗, ∆Σ∗)∥2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 � ≲ log2 n n , sup G∗∈Ξ1(ln) EpG∗ � ∥(∆µ∗, ∆Σ∗)∥4|�λn − λ∗|2 � ≲ log2 n n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof of Theorem 6 is in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The results of part (b) are the generalization of those in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 and Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 in [12] to the setting of strongly identifiable in the second-order kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The condition regarding the lower bound of λ in the formation of Ξ1(ln) is necessary to guarantee that (�µn, �Σn) and �λn are consistent estimators of (µ∗, Σ∗) and λ∗ respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In particular, from the results in equation (26) of the proof of Theorem 6, we have for any G∗ ∈ Ξ that EpG∗(λ∗)2∥(∆µ∗, ∆Σ∗)∥2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 ≲ log2 n n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, for any 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2 we get EpG∗ ��(∆�µn)i (∆µ∗)i − 1 �2� ≲ log2 n n(λ∗)2 {(∆µ∗)i}4 , EpG∗ ��(∆�Σn)uv (∆Σ∗)uv − 1 �2� ≲ log2 n n(λ∗)2 {(∆Σ∗)uv}4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 12 It indicates that log n √nλ∗ min 1≤i≤d1,1≤u,v≤d2 {|(∆µ∗)i|2, |(∆Σ∗)i|2} → 0 for the left-hand-side terms of the above display to go to 0 for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The results of Theorem 6 imply that as long as the kernel functions are strongly identifiable in the second order, the convergence rates of �µn to µ∗ and �Σn to Σ∗ are similar, which depend on the vanishing rate of (λ∗)2∥(∆µ∗, ∆Σ∗)∥2 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In our next result of location-covariance multivari- ate Gaussian distribution, we will demonstrate that such uniform convergence rates of different parameters no longer hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Weakly identifiable and non-distinguishable settings) Assume that f is a family of location-covariance multivariate Gaussian distributions, and h0(x) = f(x|µ0, Σ0) for some (µ0, Σ0) ∈ Θ × Σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We define Ξ2(ln) := � G = (λ, µ, Σ) ∈ Ξ : ln min 1≤i≤d,1≤u,v≤d {|(∆µ)i|4, |(∆Σ)uv|2} √n ≤ λ � , for any sequence {ln}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, the following holds: (a) (Minimax lower bound) For any r < 1 and sequence {ln}, there exist two universal positive constants c1 and c2 such that inf � Gn∈Ξ sup G∈Ξ2(ln) EpG � λ2 � ∥∆µ∥4 + ∥∆Σ∥2� � ∥�µn − µ∥4 + ∥�Σn − Σ∥2�� ≥ c1n−1/r, inf � Gn∈Ξ sup G∈Ξ2(ln) EpG � � ∥∆µ∥8 + ∥∆Σ∥4� |�λ − λ|2 � ≥ c2n−1/r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) (MLE rate) Let �Gn be the estimator defined in (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, for any sequence {ln} such that ln/ log n → ∞ the following holds sup G∗∈Ξ2(ln) EpG∗ � (λ∗)2 � ∥∆µ∗∥4 + ∥∆Σ∗∥2� � ∥�µn − µ∗∥4 + ∥�Σn − Σ∗∥2�� ≲ log2 n n , sup G∗∈Ξ2(ln) EpG∗ � � ∥∆µ∗∥8 + ∥∆Σ∗∥4� |�λn − λ∗|2 � ≲ log2 n n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof of Theorem 7 is in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A few comments are in order: (i) Similar to the argument after Theorem 6, the condition regarding λ in the formulation of Ξ2(ln) is to guarantee that (�µn, �Σn) and �λn are consistent estimators of (µ∗, Σ∗) and λ∗, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (ii) The results of part (b) indicate that the convergence rate of estimating Σ∗ is generally much faster than that of estimating µ∗ regardless of the circumstance of (λ∗)2 � ∥∆µ∗∥4 + ∥∆Σ∗∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The non-uniformity of these convergence rates is mainly due to the structure of the partial differential 13 equation in (9) where the second order derivative of the location parameter and the first order derivative of covariance parameter correlates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (iii) From the results of part (b), it is clear that when ∥∆µ∗∥+∥∆Σ∗∥ ̸→ 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', (µ∗, Σ∗) → (µ, Σ) ̸= (µ0, Σ0), and λ∗ ̸→ 0, the convergence rate of �λn to λ∗ is n−1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, by using the result from part (a) of Proposition 4 we can verify that sup G∗ EpG∗ � (λ∗)2 � ∥�µn − µ∗∥2 + ∥�Σn − Σ∗∥2�� ≲ log2 n n , where the supremum is taken over {G∗ ∈ Ξ2(ln) : K(G∗, G) ≤ ǫ}, and G = (λ, µ, Σ), λ∗ → λ, and ǫ is some sufficiently small positive constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since λ ̸= 0, we achieve the optimal convergence rate n−1/2 of estimating (µ∗, Σ∗) within a sufficiently small neighborhood of G under metric K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' These results imply that even though the convergence rate of estimating G∗ may be extremely slow when G∗ moves over the whole space Ξ2(ln) (global convergence), such convergence rate can be at standard rate n−1/2 when G∗ moves within a sufficiently small neighborhood of some appropriate parameters G (local convergence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As we have seen from the convergence rate results from location-covariance multivariate Gaussian distributions, the PDE structure (9) plays a key role in the slow convergence rates of location and covariance parameters as well as the mismatch of orders of these rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 5 Conclusion In this paper, we establish the rate for estimating true parameters in the matrix-covariate deviated model (2) by using the MLE method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' During our derivation, we have to overcome two major obstacles, which are firstly the interaction between the null hypothesis density h0 and the alternative density function f, and secondly the likelihood of the deviated proportion λ∗ vanishing to either endpoints of the interval [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To this end, we introduce a notion of distinguishability to control the linear independent relation between h0 and f, and finally achieve the optimal convergence rate of the MLE under both distinguishable and non-distinguishable settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Acknowledgements Nhat Ho acknowledges support from the NSF IFML 2019844 and the NSF AI Institute for Founda- tions of Machine Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' References [1] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cai, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Jeng, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Jin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Optimal detection of heterogeneous and heteroscedastic mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 73(5):629–662, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [2] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cai, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Jin, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Estimation and confidence sets for sparse normal mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 35(6):2421–2449, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') 14 [3] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cai and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Optimal detection of sparse mixtures against a given null distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' IEEE Transactions on Information Theory, 60(4):2217 – 2232, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [4] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Casella and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Berger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Statistical inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cengage Learning, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Tests for homogeneity in normal mixtures in the presence of a structural parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Statistica Sinica, 13:351–365, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [6] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Kalbfleisch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A modified likelihood ratio test for homogeneity in finite mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the Royal Statistical Society: Series B (Statistical Methodol- ogy), 63(1):19–29, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [7] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Li, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Fu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Inference on the order of a normal mixture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the American Statistical Association, 107:1096–1105, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [8] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Optimal rate of convergence for finite mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 23(1):221–233, 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 2 and 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [9] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cochran.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The χ2 test of goodness of fit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The Annals of mathematical statistics, pages 315–345, 1952.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [10] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Deb, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Saha, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Guntuboyina, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Sen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Two-component mixture model in the presence of covariates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the American Statistical Association, 117(540):1820–1834, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [11] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Donoho and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Jin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Higher criticism for detecting sparse heterogeneous mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 32(3):962–994, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [12] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Gadat, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Kahn, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Marteau, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Maugis-Rabusseau.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Parameter recovery in two- component contamination mixtures: The l2 strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In Annales de l’Institut Henri Poincaré, Probabilités et Statistiques, volume 56, pages 1391–1418.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Institut Henri Poincaré, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 3, 8, 12, 34, 35, and 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [13] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Giné and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Nickl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Mathematical foundations of infinite-dimensional statistical models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cambridge university press, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [14] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Heinrich and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Kahn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Strong identifiability and optimal minimax rates for finite mixture estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 46(6A):2844–2870, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 2, 8, and 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [15] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Ho and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Nguyen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Singularity structures and impacts on parameter estimation in finite mixtures of distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' SIAM Journal on Mathematics of Data Science, 1(4):730–758, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [16] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Ho and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Nguyen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Convergence rates of parameter estimation for some weakly identifiable finite mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 44:2726–2755, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 2, 9, and 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [17] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Ho and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Nguyen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' On strong identifiability and convergence rates of parameter estimation in finite mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Electronic Journal of Statistics, 10:271–307, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 2 and 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [18] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Kasahara and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Shimotsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Non-parametric identification and estimation of the number of components in multivariate mixtures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 76(1):97–111, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') 15 [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Kasahara and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Shimotsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Testing the number of components in normal mixture regression models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the American Statistical Association, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [20] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Kasahara and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Shimotsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Testing the number of components in normal mixture regression models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the American Statistical Association, 110(512):1632–1645, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [21] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Li and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Testing the order of a finite mixture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the American Statistical Association, 105(491):1084–1092, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [22] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Lee, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Jordan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A kernelized stein discrepancy for goodness-of-fit tests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In Pro- ceedings of The 33rd International Conference on Machine Learning, volume 48 of Proceedings of Machine Learning Research, pages 276–284.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' PMLR, 20–22 Jun 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [23] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Manole and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Ho.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Refined convergence rates for maximum likelihood estimation under finite mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In Proceedings of the 39th International Conference on Machine Learning, volume 162 of Proceedings of Machine Learning Research, pages 14979–15006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' PMLR, 17–23 Jul 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [24] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Nguyen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Convergence of latent mixing measures in finite and infinite mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 4(1):370–400, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 2 and 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [25] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Patra and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Sen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Estimation of a two-component mixture model with applications to multiple testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(4):869–893, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [26] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Schrab, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Guedj, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Gretton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' KSD aggregated goodness-of-fit test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Oh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Agarwal, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Belgrave, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cho, editors, Advances in Neural Information Processing Systems, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [27] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Talts, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Betancourt, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Simpson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Vehtari, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Gelman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Validating bayesian inference algorithms with simulation-based calibration, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [28] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' van de Geer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Empirical Processes in M-estimation, volume 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Cambridge university press, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on pages 4 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [29] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Verzelen and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Arias-Castro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Detection and feature selection in sparse mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Annals of Statistics, 45(5):1920–1950, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [30] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Villani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Topics in Optimal Transportation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' American Mathematical Society, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') [31] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Yang, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Rao, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Neville.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A stein-papangelou goodness-of-fit test for point processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In Proceedings of the Twenty-Second International Conference on Artificial Intelligence and Statistics, volume 89 of Proceedings of Machine Learning Research, pages 226–235.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' PMLR, 16–18 Apr 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (Cited on page 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=') 16 Supplement to “Optimal Rate for Parameter Estimation in Matrix-variate Deviated Models” In this supplementary material, we will present the proofs for the convergence rates of densities in Appendix A, while those for minimax lower bounds and convergence rates of parameter estimation are left in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Finally, we provide a necessary lemma for those results along with its proof in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A Proofs for Convergence Rates of Densities In this appendix, we provide proofs for key results on the convergence rates of densities presented in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 Proof of Theorem 2 The second inequality in Theorem 2 is straightforward from the equivalent form of W1(G, G∗) in Lemma 1 (see Appendix C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we will only focus on establishing the first inequality in that theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We start with the following key result: Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given the assumptions in Theorem 3 and G = (λ, µ, Σ) such that λ ∈ [0, 1] and (µ, Σ) can be equal to (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, we have lim ǫ→0 inf G,G∗ �V (pG, pG∗) K(G, G∗) : K(G, G) ∨ K(G∗, G) ≤ ǫ � > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The high level idea of the proof of Proposition 3 is to utilize the Taylor expansion techniques previously employed in [8, 24, 17, 14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Indeed, following Fatou’s argument from Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 in [17], to obtain the conclusion of Proposition 3 it suffices to demonstrate that lim ǫ→0 inf G,G∗ �∥pG − pG∗∥∞ K(G, G∗) : K(G, G) ∨ K(G∗, G) ≤ ǫ � > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that the above conclusion does not hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It implies that we can find two sequences Gn = (λn, µn, Σn) and G∗,n = (λ∗ n, µ∗ n, Σ∗ n) such that K(Gn, G) → 0, K(G∗,n, G) → 0, and ∥pGn − pG∗,n∥∞/K(Gn, G∗,n) → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we only consider the most challenging set- ting of (µn, Σn) and (µ∗ n, Σ∗ n) when they share the same limit point (µ′, Σ′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The other settings of these two components can be argued in the same fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Here, (µ′, Σ′) is not necessarily equal to (µ0, Σ0) or (µ, Σ) as λn, λ∗ n can go to 0 or 1 in the limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under that setting, by means of Taylor expansion up to the first order we obtain pGn(x) − pG∗,n(x) K(Gn, G∗,n) = (λ∗ n − λn)[h0(x|µ0, Σ0) − f(x|µ∗ n, Σ∗ n)] + λn[f(x|µn, Σn) − f(x|µ∗ n, Σ∗ n)] K(Gn, G∗,n) = (λ∗ n − λn)[h0(x|µ0, Σ0) − f(x|µ∗ n, Σ∗ n)] K(Gn, G∗,n) + λn � � |α|=1 (µn − µ∗ n)α1(Σn − Σ∗ n)α2 α!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) + R1(x) � K(Gn, G∗,n) , 17 where R1(x) is Taylor remainder and α = (α1, α2) in the summation of the second equality sat- isfies α1 = (α(1) 1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' , α(1) d1 ) ∈ Nd1, α2 = (α(2) uv ) ∈ Nd2×d2, |α| = d1 � i=1 α(1) i + � 1≤u,v≤d2 α(2) uv , and α!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' = d1 � i=1 α(1) i !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' � 1≤u,v≤d2 α(2) uv !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='. As f admits the first order uniform Lipschitz condition, we have R1(x) = O(∥(µn, Σn) − (µ∗ n, Σ∗ n)∥1+γ) for some γ > 0, which implies that λn|R1(x)|/K(Gn, G∗,n) = O(∥(µn, Σn) − (µ∗ n, Σ∗ n)∥γ) → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we can treat [pGn(x) − pG∗,n(x)]/K(Gn, G∗,n) as the linear combination of h0(x|θ0, Σ0) and ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) when |α| ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that the coefficients of these terms go to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, by studying the coefficients of h0(x|θ0, Σ0), ∂f ∂µi (x|µ0, Σ0), and ∂f ∂Σuv (x|µ0, Σ0), we achieve (λ∗ n − λn)/K(Gn, G∗,n) → 0, λn(µn − µ∗ n)i/K(Gn, G∗,n) → 0, λn(Σn − Σ∗ n)uv/K(Gn, G∗,n) → 0 for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2 where (a)i denotes the i-th element of vector a and Auv denotes the (u, v)-th element of matrix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It would imply that (λn + λ∗ n)∥(µn, Σn) − (µ∗ n, Σ∗ n)∥/K(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we achieve 1 = � |λ∗ n − λn| + (λn + λ∗ n)∥(µn, Σn) − (µ∗ n, Σ∗ n)∥ � /K(Gn, G∗,n) → 0, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, not all the coefficients of h0(x|θ0, Σ0) and ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) go to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If we denote mn to be the maximum of the absolute values of the coefficients of h0(x|θ0, Σ0) and ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n), then we get 1/mn ̸→ ∞ as n → ∞, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', 1/mn is uniformly bounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, we achieve for all x that 1 mn pGn(x) − pG∗,n(x) K(Gn, G∗,n) → ηf(|µ0, Σ0) + � |α|≤1 τα ∂|α|f ∂µα1∂Σα2 (x|µ′, Σ′) = 0 for some coefficients η and τα such that they are not all 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, as f is distinguishable from h0 up to the first order, the above equation indicates that η = τα = 0 for all |α| ≤ 1, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, we achieve the conclusion of the proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, assume that the conclusion of Theorem (2) does not hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It implies that we can find two sequences G′ n and G′ ∗,n such that An = ∥pG′n − pG′∗,n∥2/K(G′ n, G′ ∗,n) → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since Θ and Ω are two bounded subsets, we can find subsequences of G′ n and G′ ∗,n such that K(G′ n, G1) and K(G′ ∗,n, G2) vanish to 0 as n → ∞ where G1, G2 are some discrete measures having one component 18 to be (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Because An → 0, we obtain V (pG′n, pG′∗,n) → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By means of Fatou’s lemma, we have 0 = lim n→∞ � ���(pG′n(x) − pG′∗,n(x)) ��� dx ≥ � lim inf n→∞ ���(pG′n(x) − pG′∗,n(x)) ��� dx = V (pG1(x), pG2(x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the fact that f is distinguishable from h0 up to the first order, the above equation implies that G1 ≡ G2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, from the result of Proposition 2, regardless of the value of G1 we would have An ̸→ 0 as n → ∞, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we obtain the conclusion of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 Proof of Theorem 3 Utilizing the same Fatou’s argument as that of Proposition 2 , to achieve the conclusion of the first inequality in Theorem 3 it suffices to demonstrate the following result Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given the assumptions in Theorem 3 and G = (λ, µ, Σ) such that λ ∈ [0, 1] and (µ, Σ) can be identical to (µ0, Σ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, the following holds (a) If (µ0, Σ0) ̸= (µ, Σ) and λ > 0, then lim ǫ→0 inf G,G∗ �∥pG − pG∗∥∞ K(G, G∗) : K(G, G) ∨ K(G∗, G) ≤ ǫ � > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) If (µ0, Σ0) ≡ (µ, Σ) or (µ0, Σ0) ̸= (µ, Σ) and λ = 0, then lim ǫ→0 inf G,G∗ �∥pG − pG∗∥∞ D(G, G∗) : D(G, G) ∨ D(G∗, G) ≤ ǫ � > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The proof of part (a) is essentially similar to that of Proposition 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' therefore, we only provide the proof for the challenging settings of part (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Here, we only consider the setting that (µ0, Σ0) ≡ (µ, Σ) as the proof for other possibilities of (µ0, Σ0) can be argued in the similar fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the transparency of our proof argument, we assume that T is an identity mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under this assumption, (µ0, Σ0) = (µ0, Σ0), G = (λ, θ0, Σ0), and h0(x|θ0, Σ0) = f(x|θ0, Σ0) for all x ∈ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that the conclusion of Proposition 3 does not hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It implies that we can find two sequences Gn = (λn, µn, Σn) and G∗,n = (λ∗ n, µ∗ n, Σ∗ n) such that D(Gn, G) = λn∥(∆µn, ∆Σn)∥2 → 0, D(G∗,n, G) = λ∗ n∥(∆µ∗ n, ∆Σ∗ n)∥2 → 0, and ∥pGn − pG∗,n∥∞/D(Gn, G∗,n) → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the transparency of presentation, we denote An = ∥(∆µn, ∆Σn)∥, Bn = ∥(∆µ∗ n, ∆Σ∗ n)∥, and Cn = ∥(µn, Σn) − (µ∗ n, Σ∗ n)∥ = ∥(∆µn, ∆Σn) − (∆µ∗ n, ∆Σ∗ n)∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we have three main cases regarding the convergence behaviors of (µn, Σn) and (µ∗ n, Σ∗ n) Case 1: Both An → 0 and Bn → 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', (µn, Σn) and (µ∗ n, Σ∗ n) vanish to (µ0, Σ0) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the symmetry between λn and λ∗ n, we assume without loss of generality that λ∗ n ≥ λn for infinite values of n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Without loss of generality, we replace these subsequences of Gn, G∗,n by the whole sequences of Gn and G∗,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, the formulation of D(Gn, G∗,n) is D(Gn, G∗,n) = (λ∗ n − λn)B2 n+ � λnAn + λ∗ nBn � Cn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 19 Now, by means of Taylor expansion up to the second order, we get pGn(x) − pG∗,n(x) D(Gn, G∗,n) = (λ∗ n − λn)[f(x|µ0, Σ0) − f(x|µ∗ n, Σ∗ n)] + λn[f(x|µn, Σn) − f(x|µ∗ n, Σ∗ n)] D(Gn, G∗,n) = (λ∗ n − λn) � 2� |α|=1 (−∆µ∗ n)α1(−∆Σ∗ n)α2 α!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) + R1(x) � D(Gn, G∗,n) + λn � 2� |α|=1 (∆µn − ∆µ∗ n)α1(∆Σn − ∆Σ∗ n)α2 α!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) + R2(x) � D(Gn, G∗,n) where R1(x) and R2(x) are Taylor remainders that satisfy R1(x) = O(B2+γ n ) and R2(x) = O(C2+γ n ) for some positive number γ due to the second order uniform Lipschitz condition of kernel density function f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From the formation of D(Gn, G∗,n), since An+Bn ≥ Cn (triangle inequality), as An → 0 and Bn → 0 it is clear that (λn − λ∗ n)|R1(x)|/D(Gn, G∗,n) ≤ |R1(x)|/B2 n = O(Bγ n) → 0 λn|R2(x)|/D(Gn, G∗,n) ≤ |R2(x)|/ {(An + Bn)Cn} = O � C2+γ n /C2 n � = O(Cγ n) → 0 as n → ∞ for all x ∈ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we achieve for all x ∈ X that � (λn − λ∗ n)|R1(x)| + λn|R2(x)| � /D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, we can treat [pGn(x)−pG∗,n(x)]/D(Gn, G∗,n) as a linear combination of ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) for all x and α = (α1, α2) such that 1 ≤ |α| ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that all the coefficients of these terms go to 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By studying the vanishing behaviors of the coefficients of ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) as |α| = 1, we achieve the following limits � λn(∆µn)i − λ∗ n(∆µ∗ n)i � /D(Gn, G∗,n) → 0, � λn(∆Σn)uv − λ∗ n(∆Σ∗ n)uv � /D(Gn, G∗,n) → 0 for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2 where (a)i denotes the i-th element of vector a and Auv denotes the (u, v)-th element of matrix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, for any 1 ≤ i, j ≤ d (i and j can be equal), the coefficient of ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) when (α1)i = (α1)j = 1 and α2 = 0 leads to � (λ∗ n − λn)(∆µ∗ n)i(∆µ∗ n)j + λn(∆µn − ∆µ∗ n)i(∆µn − ∆µ∗ n)j � /D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (11) When i = j, the above limits lead to � (λ∗ n − λn) {(∆µ∗ n)i}2 + λn {(∆µn − ∆µ∗ n)i}2 � /D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 20 Therefore, we would have � (λ∗ n − λn)∥∆µ∗ n∥2 + λn∥∆µn − ∆µ∗ n∥2 � /D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (12) Now, as � λn(∆µn)i − λ∗ n(∆µ∗ n)i � /D(Gn, G∗,n) → 0 we obtain that � λn(∆µn)i(∆µn)j − λ∗ n(∆µ∗ n)i(∆µn)j � /D(Gn, G∗,n) → 0, � λn(∆µn)i(∆µ∗ n)j − λ∗ n(∆µ∗ n)i(∆µ∗ n)j � /D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (13) Plugging the results from (13) into (11), we ultimately achieve for any 1 ≤ i, j ≤ d that (λ∗ n − λn)(∆µ∗ n)i(∆µn)j/D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (14) Using the results from (11) and (14), we would have λn(∆µn)i(∆µn − ∆µ∗ n)j D(Gn, G∗,n) → (λ∗ n − λn)(∆µn)i(∆µ∗ n)j D(Gn, G∗,n) → 0, λ∗ n(∆µ∗ n)i(∆µn − ∆µ∗ n)j D(Gn, G∗,n) → (λ∗ n − λn)(∆µ∗ n)i(∆µn)j D(Gn, G∗,n) → 0 for any 1 ≤ i, j ≤ d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, it leads to � 1≤i,j≤d λn|(∆µn)i||(∆µn − ∆µ∗ n)j| D(Gn, G∗,n) = λn � 1≤i≤d |(∆µn)i| � 1≤i≤d |(∆µn − ∆µ∗ n)i| D(Gn, G∗,n) → 0, � 1≤i,j≤d λ∗ n|(∆µ∗ n)i||(∆µn − ∆µ∗ n)j| D(Gn, G∗,n) = λ∗ n � 1≤i≤d |(∆µn)∗ i | � 1≤i≤d |(∆µn − ∆µ∗ n)i| D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The above results mean that λn∥∆µn∥∥∆µn − ∆µ∗ n∥/D(Gn, G∗,n) → 0, λ∗ n∥∆µ∗ n∥∥∆µn − ∆µ∗ n∥/D(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (15) By applying the above argument with the coefficients of ∂|α|f ∂µα1∂Σα2 (x|µ∗ n,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Σ∗ n) when α1 = 0 and (α2)u1v1 = (α2)u2v2 = 1 for any two pairs (u1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' v1),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (u2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' v2) (not neccessarily distinct) such that 1 ≤ u1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' u2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' v1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' v2 ≤ d or (α1)i = 1 and (α2)uv = 1 for any 1 ≤ i ≤ d and 1 ≤ u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' v ≤ d,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we respectively obtain that � (λ∗ n − λn)∥∆Σ∗ n∥2 + λn∥∆Σn − ∆Σ∗ n∥2 � /D(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) → 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' λn∥∆Σn∥∥∆Σn − ∆Σ∗ n∥/D(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) → 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' λ∗ n∥∆Σ∗ n∥∥∆Σn − ∆Σ∗ n∥/D(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) → 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' λn∥∆µn∥∥∆Σn − ∆Σ∗ n∥/D(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) → 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' λ∗ n∥∆µ∗ n∥∥∆Σn − ∆Σ∗ n∥/D(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (16) Combining the results from (12), (15), and (16) leads to 1 = D(Gn, G∗,n)/D(Gn, G∗,n) → 0, 21 which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, not all the coefficients of ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) go to 0 as 1 ≤ |α| ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Follow the argument of Proposition 2, by denoting mn to be the maximum of the absolute values of the coefficients of ∂|α|f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) we achieve for all x that 1 mn pGn(x) − pG∗,n(x) W 2 2 (Gn, G∗,n) → 2 � |α|=1 τα ∂|α|f ∂µα1∂Σα2 (x|µ0, Σ0) = 0 where τα ∈ R are some coefficients such that not all of them are 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the second order identifiability condition of f, the above equation implies that τα = 0 for all α such that |α| = 2, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, Case 1 cannot happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 2: Exactly one of An and Bn goes to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', there exists at least one component among (µn, Σn) and (µ∗ n, Σ∗ n) that does not converge to (µ0, Σ0) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the symmetry of An and Bn, we assume without loss of generality that An ̸→ 0 and Bn → 0, which is equivalent to (µn, Σn) → (µ′, Σ′) ̸= (µ0, Σ0) while (µ∗ n, Σ∗ n) → (µ0, Σ0) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We denote D′(Gn, G∗,n) = |λ∗ n − λn|Bn + λnAn + λ∗ nBn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since [pGn(x) − pG∗,n(x)]/D(Gn, G∗,n) → 0, we achieve that [pGn(x) − pG∗,n(x)]/D′(Gn, G∗,n) → 0 for all x as D(Gn, G∗,n) ≲ D′(Gn, G∗,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By means of Taylor expansion up to the first order, we have pGn(x) − pG∗,n(x) D′(Gn, G∗,n) = (λ∗ n − λn)[f(x|µ0, Σ0) − f(x|µ∗ n, Σ∗ n)] + λnf(x|µn, Σn) − λnf(x|µ∗ n, Σ∗ n) D′(Gn, G∗,n) = (λ∗ n − λn) � � |α|=1 (−∆µ∗ n)α1(−∆Σ∗ n)α2 α!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) + R′ 1(x) � D′(Gn, G∗,n) + λnf(x|µn, Σn) − λnf(x|µ∗ n, Σ∗ n) D′(Gn, G∗,n) where R′ 1(x) is Taylor remainder that satisfies (λ∗ n − λn)|R′ 1(x)|/D′(Gn, G∗,n) = O(Bγ′ n ) → 0 for some positive number γ′ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since (µn, Σn) and (µ∗ n, Σ∗ n) do not have the same limit, they will be different when n is large enough, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', n ≥ M′ for some value of M′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, as n ≥ M′, [pGn(x)−pG∗,n(x)]/D′(Gn, G∗,n) becomes a linear combination of ∂f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) for all |α| ≤ 1 and f(x|µn, Σn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If all of the coefficients of these terms go to 0, we would have λn/D′(Gn, G∗,n) → 0, (λ∗ n − λn)(−∆µ∗ n)i/D′(Gn, G∗,n) → 0, and (λ∗ n − λn)(−∆Σ∗ n)uv/D′(Gn, G∗,n) → 0 for all 1 ≤ i ≤ d1 and 1 ≤ u, v ≤ d2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It would imply that (λ∗ n − λn)Bn/D′(Gn, G∗,n) → 0, λnAn/D′(Gn, G∗,n) → 0, and λnBn/D′(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' These results lead to 1 = � |λ∗ n − λn|Bn + λnAn + λ∗ nBn � /D′(Gn, G∗,n) → 0, 22 a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, not all the coefficients of ∂f ∂µα1∂Σα2 (x|µ∗ n, Σ∗ n) and f(x|µn, Σn) go to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By defining m′ n to be the maximum of these coefficients, we achieve for all x that 1 m′n pGn(x) − pG∗,n(x) D′(Gn, G∗,n) → η′f(x|µ0, Σ0) + 1 � |α|=0 τ ′ α ∂|α|f ∂µα1∂Σα2 (x|µ′, Σ′) = 0, where η′ and τ ′ α are coefficients such that not all of them are 0, which is a contradiction to the first order identifiability of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, Case 2 cannot hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 3: Both An and Bn do not go to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', (µn, Σn) and (µ∗ n, Σ∗ n) do not converge to (µ0, Σ0) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since Dn(Gn, G∗,n) ≲ K(Gn, G∗,n) = |λn − λ∗ n| + (λn + λ∗ n)Cn and [pGn(x) − pG∗,n(x)]/D(Gn, G∗,n) → 0, we achieve that [pGn(x) − pG∗,n(x)]/K(Gn, G∗,n) → 0 for all x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, by using the same argument as that of the proof of Proposition 2, we also reach the contra- diction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, Case 3 cannot happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In sum, we achieve the conclusion of the proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='3 Proof of Theorem 4 For the simplicity of proof argument, we will only consider the univariate setting of Gaussian kernel, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', when both µ and Σ = σ2 are scalars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The argument for the multivariate setting of Gaussian kernel can be argued in the rather similar fashion, which is omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Throughout this proof, we denote v := σ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, according to the proof argument of Proposition 2 and Proposition 3, to achieve the conclusion of the theorem it suffices to demonstrate the following result: Proposition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given G = (λ, µ, v) such that λ ∈ [0, 1] and (µ, v) can be identical to (µ0, v0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, the following holds (a) If (µ0, v0) ̸= (µ, v) and λ > 0, then lim ǫ→0 inf G,G∗ �∥pG − pG∗∥∞ K(G, G∗) : K(G, G) ∨ K(G∗, G) ≤ ǫ � > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) If (µ0, v0) ≡ (µ, v) or (µ0, v0) ̸= (µ, v) and λ = 0, then lim ǫ→0 inf G,G∗ �∥pG − pG∗∥∞ Q(G, G∗) : Q(G, G) ∨ Q(G∗, G) ≤ ǫ � > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We will only provide the proof for part (b) since the proofs for part (a) can be argued in similar fashion as that of Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the transparency of our proof argument, we also assume that (µ0, v0) ≡ (µ, v) and T is an identity mapping, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', (µ0, v0) = (µ0, v0), G = (λ, θ0, v0), and h0(x|θ0, Σ0) = f(x|θ0, Σ0) for all x ∈ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Assume that the conclusion of Proposition 4 does not hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It implies that we can find two sequences Gn = (λn, µn, vn) and G∗,n = (λ∗ n, µ∗ n, v∗ n) such that Q(Gn, G) → 0, Q(G∗,n, G) → 0, and ∥pGn − pG∗,n∥∞/Q(Gn, G∗,n) → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the symmetry between λn and λ∗ n, we can assume without loss of generality that λ∗ n ≥ λn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, 23 we achieve that Q(Gn, G∗,n) = (λ∗ n − λn)(|∆µ∗ n|4 + |∆v∗ n|2)+ � λn(|∆µn|2 + |∆vn|) + λ∗ n(|∆µ∗ n|2 + |∆v∗ n|) � × × � |µn − µ∗ n|2 + |vn − v∗ n| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In this proof, we only consider the scenario when ∥(∆µn, ∆vn)∥ → 0 and ∥(∆µ∗ n, ∆v∗ n)∥ → 0 since the arguments for other settings of these two terms are similar to those of Case 2 and Case 3 in the proof of Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As being indicated in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2, the univariate Gaussian kernel contains the partial differential equation structure ∂2f ∂µ2 (x|µ, v) = 2∂f ∂v (x|µ, v) for all µ ∈ Θ and v ∈ Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, for any α = (α1, α2) we can check that ∂|α|f ∂µα1∂vα2 (x|µ, v) = 1 2α2 ∂βf ∂µβ (x|µ, v) where β = α1 + 2α2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, by means of Taylor expansion up to the fourth order, we obtain pGn(x) − pG∗,n(x) Q(Gn, G∗,n) = (λ∗ n − λn) � 4� |α|=1 (−∆µ∗ n)α1(−∆v∗ n)α2 α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂|α|f ∂µα1∂vα2 (x|µ∗ n, v∗ n) + R1(x) � Q(Gn, G∗,n) + λn � 4� |α|=1 (∆µn − ∆µ∗ n)α1(∆vn − ∆v∗ n)α2 α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂|α|f ∂µα1∂vα2 (x|µ∗ n, v∗ n) + R2(x) � Q(Gn, G∗,n) = 8 � β=1 � α1,α2 (λ∗ n − λn)(−∆µ∗ n)α1(−∆v∗ n)α2 + λn(∆µn − ∆µ∗ n)α1(∆vn − ∆v∗ n)α2 2α2α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='Q(Gn, G∗,n) × ∂βf ∂µβ (x|µ∗ n, v∗ n) + (λ∗ n − λn)R1(x) + λnR2(x) Q(Gn, G∗,n) where R1(x), R2(x) are Taylor remainders and the range of α1, α2 in the summation of the second equality satisfies β = α1 +2α2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As Gaussian kernel admits fourth-order uniform Lipschitz condition, it is clear that (λ∗ n − λn)|R1(x)| + λn|R2(x)| Q(Gn, G∗,n) = O(∥(∆µ∗ n, ∆v∗ n)∥γ + ∥(µn, vn) − (µ∗ n, v∗ n)∥γ) → 0 as n → ∞ for some γ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we can consider [pGn(x) − pG∗,n(x)]/Q(Gn, G∗,n) as a linear combination of ∂βf ∂µβ (x|µ∗ n, v∗ n) for 1 ≤ β ≤ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If all of the coefficients of these terms go to 0, then we obtain Lβ = � α1,α2 (λ∗ n − λn)(−∆µ∗ n)α1(−∆v∗ n)α2 + λn(∆µn − ∆µ∗ n)α1(∆vn − ∆v∗ n)α2 2|α2|α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Q(Gn, G∗,n) → 0 for any 1 ≤ β ≤ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we divide our argument with Lβ into two key cases 24 Case 1: � λn(|∆µn|2 + |∆vn|) + λ∗ n|(∆µ∗ n|2 + |∆v∗ n|) � / � λn(|µn − µ∗ n|2 + |vn − v∗ n|) � ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It implies that as n is large enough, we would have Q(Gn, G∗,n) ≲ (λ∗ n − λn)(|∆µ∗ n|4 + |∆v∗ n|2) + λn(|∆µn − ∆µ∗ n|4 + |∆vn − ∆v∗ n|2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining the above result with Lβ → 0 for all 1 ≤ β ≤ 8, we get Hβ = � α1,α2 (λ∗ n − λn)(−∆µ∗ n)α1(−∆v∗ n)α2 + λn(∆µn − ∆µ∗ n)α1(∆vn − ∆v∗ n)α2 2|α2|α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (λ∗n − λn)(|∆µ∗n|4 + |∆v∗n|2) + λn(|∆µn − ∆µ∗n|4 + |∆vn − ∆v∗n|2) → 0 Note that, when the denominator of the above limits is (λ∗ n − λn)(|∆µ∗ n|4 + |∆v∗ n|4) + λn(|∆µn − ∆µ∗ n|4 +|∆vn −∆v∗ n|4), the technique for studying the above system of limits with this denominator has been considered in Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='3 in [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, since the current denominator of Hβ strongly dominates by the previous denominator, we must develop a more sophisticated control of Hβ as 1 ≤ β ≤ 8 to obtain a concrete understanding of their limits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the symmetry between λ∗ n−λn and λn, we assume without loss of generality that λ∗ n−λn ≤ λn for all n (by the subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We have two possibilities regarding λn and λ∗ n Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: (λ∗ n − λn)/λn ̸→ 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under that setting, we define pn = max {λ∗ n − λn, λn} and Mn = max � |∆µ∗ n|, |∆µ∗ n − ∆µn|, |∆v∗ n|1/2, |∆v∗ n − ∆vn|1/2� Additionally, we let (λ∗ n − λn)/pn → c2 1, λn/pn → c2 2, ∆µ∗ n/Mn → −a1, (∆µ∗ n − ∆µn)/Mn → a2, ∆v∗ n/M2 n → −2b1, and (∆vn − ∆v∗ n)/M2 n → 2b2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, at least one among a1, a2, b1, b2 and both c1, c2 are different from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, by dividing both the numerators and the denominators of Hβ as 1 ≤ β ≤ 4 by pnMβ n , we achieve the following system of polynomial equations c2 1a1 + c2 2a2 = 0 1 2(c2 1a2 1 + c2 2a2 2) + c2 1b1 + c2 2b2 = 0 1 3!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (c2 1a3 1 + c2 2a3 2) + c2 1a1b1 + c2 2a2b2 = 0 1 4!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (c2 1a4 1 + c2 2a4 2) + 1 2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (c2 1a2 1b1 + c2 2a2 2b2) + 1 2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (c2 1b2 1 + c2 2b2 2) = 0, As being indicated in Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 in [16], this system will only admits the trivial solution, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', a1 = a2 = b1 = b2 = 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 cannot happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: (λ∗ n − λn)/λn → 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', λ∗ n/λn → 1, as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under that setting, if Mn ∈ max � |∆µn − ∆µ∗ n|, |∆vn − ∆v∗ n|1/2� , then we have λnM4 n = max � (λ∗ n − λn)|∆µ∗ n|4, (λ∗ n − λn)|∆µn − ∆µ∗ n|4, λn|∆v∗ n|2, λn|∆vn − ∆v∗ n|2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 25 By dividing both the numerator and the denominator of H1 by λnMn, given that the new denomi- nator of H1 goes to 0, its new numerator also goes to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', we obtain (λ∗ n − λn)(−∆µ∗ n)/ {λnMn} + (∆µn − ∆µ∗ n)/Mn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since (λ∗ n − λn)/λn → 0 and |∆µ∗ n| ≤ Mn, we have (λ∗ n − λn)(−∆µ∗ n)/ {λnMn} → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we have (∆µn − ∆µ∗ n)/Mn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With the previous results, by dividing both the numerator and the denominator of H2 by λnM2 n and given that the new denominator goes to 0, we have (λ∗ n − λn)(−∆v∗ n)/ � λnM2 n � + (∆vn − ∆v∗ n)/M2 n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As (λ∗ n − λn)(−∆v∗ n)/ � λnM2 n � → 0 (due to the assumption of Mn), we get (∆vn − ∆v∗ n)/M2 n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' These results imply that 1 = max � |∆µn − ∆µ∗ n|2, |∆vn − ∆v∗ n| � M2n → 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we would only have Mn ∈ max � |∆µ∗ n|, |∆v∗ n|1/2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For the simplicity of the proof, we only consider the setting when Mn = |∆µ∗ n| for all n (by subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The setting that Mn = |∆v∗ n|1/2 for all n can be argued in the similar fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, if we have max � |∆µn − ∆µ∗ n|, |∆vn − ∆v∗ n|1/2� /Mn ̸→ 0, then by dividing the numerator and denominator of Hi with λn � max � |∆µn − ∆µ∗ n|, |∆vn − ∆v∗ n|1/2��i as 1 ≤ i ≤ 2, we would achieve 1 = max � |∆µn − ∆µ∗ n|2, |∆vn − ∆v∗ n| � max {|∆µn − ∆µ∗n|2, |∆vn − ∆v∗n|} → 0, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we must have max � |∆µn − ∆µ∗ n|, |∆vn − ∆v∗ n|1/2� /Mn ̸→ 0 (17) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we further divide the argument under that setting of Mn into two small cases Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: (λ∗ n − λn)|∆µ∗ n|4 ≤ λn|∆µn − ∆µ∗ n|4 for all n (by subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since Mn = |∆µ∗ n|, we would have (λ∗ n − λn)|∆µ∗ n|i ≤ λn|∆µn − ∆µ∗ n|i for all n and 1 ≤ l ≤ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, we obtain that (λ∗ n − λn)|∆µ∗ n|4 λn|∆µn − ∆µ∗n| ≤ λn|∆µn − ∆µ∗ n|4 λn|∆µn − ∆µ∗n| → 0, (λ∗ n − λn)|∆v∗ n|2 λn|∆µn − ∆µ∗n| ≤ (λ∗ n − λn)|∆µ∗ n|4 λn|∆µn − ∆µ∗n| → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If |∆µn − ∆µ∗ n|/|∆vn − ∆v∗ n|1/2 ̸→ 0, by diving both the numerator and the denominator of H1 by λn|∆µn − ∆µ∗ n| and given that the new denominator goes to 0, the new numerator must converge to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we have (λ∗ n − λn)∆µ∗ n/ {λn(∆µn − ∆µ∗ n)} → −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 26 However, since we have |(∆µn − ∆µ∗ n)/∆µ∗ n → 0, the above result would imply that (λ∗ n − λn)|∆µ∗ n|4/ � λn|∆µn − ∆µ∗ n|4� → ∞, which is a contradiction to the assumption of Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, we must have |∆µn − ∆µ∗ n|/|∆vn − ∆v∗ n|1/2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we also have that (λ∗ n − λn)|∆µ∗ n|4 λn|∆vn − ∆v∗n|i/2 ≲ λn|∆vn − ∆v∗ n|2 λn|∆µn − ∆µ∗n|i/2 → 0, (λ∗ n − λn)|∆v∗ n|4 λn|∆vn − ∆v∗n|i/2 ≤ (λ∗ n − λn)|∆µ∗ n|4 λn|∆vn − ∆v∗n|i/2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' for all 1 ≤ i ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Without loss of generality, we assume that ∆vn − ∆v∗ n > 0 for all n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We denote (−∆µ∗ n) = qn 1 (∆vn − ∆v∗ n) and ∆v∗ n = qn 2 (∆vn − ∆v∗ n) for all n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From the result of (17), we would have |qn 1 | → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given the above results, by dividing the numerators and the denominators of Hβ by λn(∆vn − ∆v∗ n)β/2 for any 1 ≤ β ≤ 3, we would have the new denominators go to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, all the new numerators of these Hβ also go to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we achieve the following system of limits λ∗ n − λn λn qn 1 → 0, λ∗ n − λn λn � (qn 1 )2 + qn 2 � + 1 → 0, λ∗ n − λn λn �(qn 1 )3 6 + qn 1 qn 2 2 � → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since |qn 1 | → ∞, the last limit in the above system implies that (λ∗ n − λn) �(qn 1 )2 3 + qn 2 � /λn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining this result with the second limit in the above system yields that (λ∗ n − λn)(qn 1 )2/λn + 3/2 → 0, which cannot happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 does not hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: (λ∗ n − λn)|∆µ∗ n|4 > λn|∆µn − ∆µ∗ n|4 for all n (by subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If (λ∗ n − λn)|∆µ∗ n|4 ≤ λn|∆vn − ∆v∗ n|2 for all n, the by using the same argument as that of Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1, we quickly achieve the contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we must have (λ∗ n − λn)|∆µ∗ n|4 > λn|∆vn − ∆v∗ n|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Denote (∆µn − ∆µ∗ n) = mn 1(−∆µ∗ n), (−∆v∗ n) = mn 2(∆µ∗ n)2, and (∆vn − ∆v∗ n) = mn 3(∆µ∗ n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since Mn = |∆µ∗ n|, we would have |mn i | ≤ 1 for all 1 ≤ i ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Denote mn i → mi for all 1 ≤ i ≤ 3 (by subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The results of (17) lead to m1 = m3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now by dividing both the numerator and denominator of Hβ by (λ∗ n−λn)(−∆µ∗ n)β for any 1 ≤ β ≤ 4, as the new denominators of H|β| do not go to ∞, we would also achieve that the new numerators of H|β| go to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' the following system of limits hold 1 + λ∗ n − λn λn mn 1 → 0, � 1 + λ∗ n − λn λn (mn 1)2 � + mn 2 + λ∗ n − λn λn mn 3 → 0, � 1 + λ∗ n − λn λn (mn 1)3 � /6+ � mn 2 + λ∗ n − λn λn mn 1mn 3 � /2 → 0, � 1 + λ∗ n − λn λn (mn 1)4 � /24+ � mn 2 + λ∗ n − λn λn (mn 3)2 � /4+ � (mn 2)2 + λ∗ n − λn λn (mn 3)2 � /8 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining with mn 1 → 0, the first and third limit of the above system of limits imply that m2 = −1/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, the second and fourth limit yields that 1/6 + m2 + m2 2/2 = 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 cannot hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 27 Case 2: � λn(|∆µn|2 + |∆vn|) + λ∗ n(|∆µ∗ n|2 + |∆v∗ n|) � / � λn(|µn − µ∗ n|2 + |vn − v∗ n|) � → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We define Q(Gn, G∗,n) = (λ∗ n − λn)(|∆µn|2 + |∆vn|)(|∆µ∗ n|2 + |∆v∗ n|)+ � λn(|∆µn|2 + |∆vn|) + λ∗ n(|∆µ∗ n|2 + |∆v∗ n|) �� |µn − µ∗ n|2 + |vn − v∗ n| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We will demonstrate that Q(Gn, G∗,n) ≍ Q(Gn, G∗,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In fact,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' from the above formulation of Q(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we would have that Q(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) ≤ 2(λ∗ n − λn)(|∆µ∗ n|2 + |∆µn − ∆µ∗ n|2 + |∆v∗ n| + |∆vn − ∆v∗ n|)(|∆µ∗ n|2 + |∆v∗ n|) + 2 � λn(|∆µn|2 + |∆vn|) + λ∗ n(|∆µ∗ n|2 + |∆v∗ n|) �� |µn − µ∗ n|2 + |vn − v∗ n| � ≤ 2Q(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) where the first inequality is due to the triangle inequality and basic inequality (a + b)2 ≤ 2(a2 + b2) and the second inequality is due to the following result (λ∗ n − λn)(|∆µn − ∆µ∗ n|2 + |∆vn − ∆v∗ n|) ≤ λ∗ n � |µn − µ∗ n|2 + |vn − v∗ n| � On the other hand,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we also have that 2Q(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n) ≥ (λ∗ n − λn)(|∆µ∗ n|2 + |∆v∗ n|)(|∆µn|2 + |∆vn| + |µn − µ∗ n|2 + |vn − v∗ n|) + � λn(|∆µn|2 + |∆vn|) + λ∗ n(|∆µ∗ n|2 + |∆v∗ n|) �� |µn − µ∗ n|2 + |vn − v∗ n| � ≥ Q(Gn,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' G∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='n)/2 where the last inequality is due to triangle inequality and basic inequality (a + b)2 ≤ 2(a2 + b2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we conclude that Q(Gn, G∗,n) ≍ Q(Gn, G∗,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, since Hβ → 0 for all 1 ≤ β ≤ 8, we would have that Fβ = � α1,α2 (λ∗ n − λn)(−∆µ∗ n)α1(−∆v∗ n)α2 + λn(∆µn − ∆µ∗ n)α1(∆vn − ∆v∗ n)α2 2|α2|α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Similar to Case 1, under Case 2 we also consider two distincts setting of λ∗ n/λn Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: λ∗ n/λn ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under this case, we denote M′ n := max � |∆µn|2, |∆vn|, |∆µ∗ n|2, |∆v∗ n| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From the assumption of Case 2, we would have |∆µn − ∆µ∗ n|2/M′ n → 0, |∆vn − ∆v∗ n|/M′ n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (18) Due to the symmetry between (|∆µn|2, |∆vn|) and (|∆µ∗ n|2, |∆v∗ n|), we assume without loss of gen- erality that M′ n ∈ max � |∆µn|2, |∆vn| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under that assumption, we have two distinct cases 28 Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: M′ n = |∆µn|2 for all n (by the subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From (18), we have |∆µn − ∆µ∗ n|/|∆µn| → 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', ∆µn/∆µ∗ n → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To be able to utilize the assumptions of Case 2, we will need to study the formulations of Fβ more deeply.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In fact, when β = 1 simple calculation yields A1 := (λn∆µn − λ∗ n∆µ∗ n)/Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' When β = 2, we have F2 = (λ∗ n − λn)(∆µ∗ n)2 + λn(∆µn − ∆µ∗ n)2 + (λ∗ n − λn)(−∆v∗ n) + λn(∆vn − ∆v∗ n) Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining with the result of A1, it is clear that (λ∗ n − λn)(∆µ∗ n)2 + λn(∆µn − ∆µ∗ n)2 Q(Gn, G∗,n) → (λ∗ n − λn)∆µn∆µ∗ n Q(Gn, G∗,n) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining the above result with F2 → 0, we would have A2 := (λ∗ n − λn)∆µn∆µ∗ n + λn∆vn − λ∗ n∆v∗ n Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we have two small cases Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: ∆vn/(∆µn)2 → 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From (18), since we have |∆vn − ∆v∗ n|/(∆µn)2 → 0, it implies that ∆v∗ n/(∆µn)2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since ∆µn/∆µ∗ n → 1, we also have that ∆v∗ n/(∆µ∗ n)2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, from the formulations of Q(Gn, G∗,n) we have (λ∗ n − λn)|∆vn∆v∗ n|/Q(Gn, G∗,n) ≤ |∆vn|/|∆µn|2 → 0, (λ∗ n − λn)|∆vn(∆µ∗ n)2|/Q(Gn, G∗,n) ≤ |∆vn|/|∆µn|2 → 0, (λ∗ n − λn)|∆v∗ n(∆µn)2|/Q(Gn, G∗,n) ≤ |∆v∗ n|/|∆µ∗ n|2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (19) From the result that A2 → 0, by multiplying A2 with ∆µn∆µ∗ n, we would also have that (λ∗ n − λn)(∆µn∆µ∗ n)2 + (λn∆vn − λ∗ n∆v∗ n)∆µn∆µ∗ n Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (20) As λ∗ n/λn ̸→ ∞, we have two distinct settings of λ∗ n/λn Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: λ∗ n/λn ̸→ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Using the result from (19) and the fact that ∆µn/∆µ∗ n → 1, we would obtain that (λn∆vn − λ∗ n∆v∗ n)∆µn∆µ∗ n/Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining the above result with (20), it leads to (λ∗ n − λn)(∆µn∆µ∗ n)2/Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (21) Combining (19) and (21), we would achieve that (λ∗ n − λn)(|∆µn|2 + |∆vn|)(|∆µ∗ n|2 + |∆v∗ n|) Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 29 From the formulation of Q(Gn, G∗,n), the above limit implies that E := � λn(|∆µn|2 + |∆vn|) + λ∗ n|(∆µ∗ n|2 + |∆v∗ n|) �� |µn − µ∗ n|2 + |vn − v∗ n| � Q(Gn, G∗,n) → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to the previous assumptions, we obtain that E ≲ max � λn(∆µn)2(∆µn − ∆µ∗ n)2, λn(∆µn)2(∆vn − ∆v∗ n) � Q(Gn, G∗,n) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By combining the results from (19) and (21), we can verify that λn(∆µn)2(∆µn − ∆µ∗ n)2 Q(Gn, G∗,n) → (λ∗ n − λn) � − (∆µn)2(∆µ∗ n)2 + (∆µn)3∆µ∗ n � Q(Gn, G∗,n) → 0, λn(∆µn)2(∆vn − ∆v∗ n) Q(Gn, G∗,n) → (λ∗ n − λn)(∆µn)2∆v∗ n Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (22) Therefore, we achieve E → 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 cannot happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: λ∗ n/λn → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under this case, if we have max �λn|∆µn − ∆µ∗ n|2 (λ∗n − λn)|∆µn|2 , λn|∆vn − ∆v∗ n| (λ∗n − λn)|∆µn|2 � → ∞, then we will achieve that (λ∗ n − λn)(∆µn∆µ∗ n)2/Q(Gn, G∗,n) ≤ min � (λ∗ n − λn)|∆µ∗ n|2 λn|∆µn − ∆µ∗n|2 , (λ∗ n − λn)|∆µ∗ n|2 λn|∆vn − ∆v∗n| � → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, by using the same argument as that of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1, we will obtain E → 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we would have that max �λn|∆µn − ∆µ∗ n|2 (λ∗n − λn)|∆µn|2 , λn|∆vn − ∆v∗ n| (λ∗n − λn)|∆µn|2 � ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (23) With that assumption, it leads to Q(Gn, G∗,n) ≍ (λ∗ n − λn)(∆µ∗ n)2(∆µn)2 ≍ (λ∗ n − λn)(∆µ∗ n)4 as ∆µ∗ n/∆µn → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we denote ∆µn − ∆µ∗ n = τ n 1 ∆µ∗ n and ∆vn − ∆v∗ n = τ n 2 (∆µ∗ n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From the assumption of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1, we would have that τ n 1 → 0 and τ n 2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By dividing both the numerator and the denominator of F3 by (λ∗ n − λn)(∆µ∗ n)3, as the new denominators of F3 goes to 0, we also obtain the numerator of this term goes to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', the following holds � −1 + λn λ∗n − λn (τ n 1 )3 � /6 + λn 2(λ∗n − λn)τ n 1 τ n 2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From (23), we have that λn(τ n 1 )2/(λ∗ n − λn) ̸→ ∞ and λnτ n 2 /(λ∗ n − λn) ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, since τ n 1 → 0 and τ n 2 → 0, we would achieve that λn(τ n 1 )3/(λ∗ n − λn) → 0 and λnτ n 1 τ n 2 /(λ∗ n − λn) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By plugging these results to the above limit, it implies that −1/6 = 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 cannot hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 30 Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: ∆vn/(∆µn)2 ̸→ 0 as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under that case, we will only consider the setting that λ∗ n/λn → 1 as the argument for other settings of that ratio can be argued in the similar fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since we have |∆vn − ∆v∗ n|/(∆µn)2 → 0, it leads to ∆v∗ n/(∆µn)2 ̸→ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining with ∆µn/∆µ∗ n → 1, it implies that as n is large enough we would have max � (∆µn)2, (∆µ∗ n)2� ≲ min {|∆vn|, |∆v∗ n|} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (24) According the formulation of Q(Gn, G∗,n), we achieve (λ∗ n − λn)|∆vn∆v∗ n| Q(Gn, G∗,n) ≤ min �(λ∗ n − λn)|∆v∗ n| λn|∆vn − ∆v∗n|, (λ∗ n − λn)|∆vn| λ∗n|∆vn − ∆v∗n|, (λ∗ n − λn)|∆v∗ n| λn|∆µn − ∆µ∗n|2 , (λ∗ n − λn)|∆vn| λ∗n|∆µn − ∆µ∗n|2 � = B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If we have B → 0, we would get (λ∗ n − λn)|∆vn∆v∗ n| Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining with (24), we can check that all the results in (19), (21), and (22) hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With similar argument as Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1, we achieve Q(Gn, G∗,n)/Q(Gn, G∗,n) → 0, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we must have B ̸→ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It implies that as n is large enough we must have max {λn, λ∗ n} max � |∆µn − ∆µ∗ n|2, |∆vn − ∆v∗ n| � ≲ (λ∗ n − λn) min {|∆vn|, |∆v∗ n|} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Furthermore, as (λ∗ n −λn)/λn → 0, we obtain |∆v∗ n|/|∆vn −∆v∗ n| → ∞ and |∆v∗ n|/|∆µn −∆µ∗ n|2 → ∞, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', ∆vn/∆v∗ n → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With all of these results, we can check that Q(Gn, G∗,n) ≲ (λ∗ n − λn)|∆v∗ n|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Denote (∆vn −∆v∗ n) = kn 1 |∆v∗ n|, (∆µn −∆µ∗ n) = kn 2 |∆v∗ n|1/2, and ∆µ∗ n = kn 3 |∆v∗ n|1/2 for all n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From all the assumptions we have thus far, we get kn 1 → 0, kn 2 → 0, and |kn 3 | ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Additionally, as B ̸→ 0, we further have λn|kn 1 |/(λ∗ n − λn) ̸→ ∞ and λn(kn 2 )2/(λ∗ n − λn) ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By dividing both the numerator and the denominator of F3 and F4 respectively by (λ∗ n−λn)|∆v∗ n|3/2 and (λ∗ n−λn)|∆v∗ n|2, as the new denominators of F3, F4 do not go to infinity, we obtain the new numerators of these terms go to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', the following holds � − (kn 3 )3 + λn λ∗n − λn (kn 2 )3 � /6+ � kn 3 + λn λ∗n − λn kn 1 kn 2 � /2 → 0, � (kn 3 )4 + λn λ∗n − λn (kn 2 )4 � /24+ � − (kn 3 )2 + λn λ∗n − λn kn 1 (kn 2 )2 � /4+ � 1 + λn λ∗n − λn (kn 1 )2 � /8 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With the assumptions with kn 1 , kn 2 , and kn 3 , we would have λn λ∗n − λn (kn 2 )i → 0, λn λ∗n − λn kn 1 (kn 2 )j → 0, λn λ∗n − λn (kn 1 )2 → 0 for any 3 ≤ i ≤ 4 and 1 ≤ j ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If we denote kn 3 → k3, by combining all the above results we achieve the following system of equations −k3 3/6 + k3/2 = 0, k4 3/24 − k2 3/4 + 1/8 = 0, which does not admit a solution, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 cannot hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 31 Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: M′ n = |∆vn| for all n (by the subsequence argument).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From (18), we would have |∆vn − ∆v∗ n|/|∆vn| → 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', ∆vn/∆v∗ n → 1, and |∆µn − ∆µ∗ n|2/|∆vn| → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The argument under this case is rather similar to that of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' therefore, we only sketch the key steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By using the result that A1 → 0 and A2 → 0, we would obtain that (λ∗ n − λn)(∆µ∗ n)4 + λn(∆µn − ∆µ∗ n)4 24Q(Gn, G∗,n) → (λ∗ n − λn)∆µn∆µ∗ n � (∆µn)2 − 3∆µn∆µ∗ n + 3(∆µ∗ n)2 � 24Q(Gn, G∗,n) , (λ∗ n − λn)(∆v∗ n)2 + λn(∆vn − ∆v∗ n)2 8Q(Gn, G∗,n) → (λ∗ n − λn)∆µ∗ n � ∆µn∆vn − ∆µ∗ n∆vn − ∆un∆v∗ n � 8Q(Gn, G∗,n) , λn(∆µn − ∆µ∗ n)2(∆vn − ∆v∗ n) 4Q(Gn, G∗,n) → (λ∗ n − λn) � ∆µn∆µ∗ n∆v∗ n − ∆µn∆µ∗ n∆vn + ∆vn∆v∗ n � 4Q(Gn, G∗,n) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As F4 → 0, we equivalently have A4 := (λ∗ n − λn) �∆µn∆µ∗ n � (∆µn)2 − 3∆µn∆µ∗ n + 3(∆µ∗ n)2 � 24Q(Gn, G∗,n) + ∆µn∆µ∗ n∆vn − 2(∆µ∗ n)2∆vn − ∆µn∆µ∗ n∆v∗ n + ∆vn∆v∗ n 8Q(Gn, G∗,n) � → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2, we only consider the setting when (∆µn)2/∆vn → 0 as other settings of this term can be argued in the similar fashion as that of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since |∆µn − ∆µ∗ n|2/|∆vn| → 0, we have (∆µ∗ n)/∆vn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As ∆vn/∆v∗ n → 1, we also further have that (∆µ∗ n)2/∆v∗ n → 0 and (∆µn)2/∆vn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we have ∆µn∆µ∗ n/∆vn → 0 and ∆µn∆µ∗ n/∆v∗ n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, from the formulation of Q(Gn, G∗,n), we achieve (λ∗ n − λn)|∆µn∆µ∗ n|2/Q(Gn, G∗,n) ≤ |∆µn|2/|∆v∗ n|2 → 0, (λ∗ n − λn)|∆vn(∆µ∗ n)2|/Q(Gn, G∗,n) ≤ |∆µ∗ n|2/|∆v∗ n| → 0, (λ∗ n − λn)|∆v∗ n(∆µn)2|/Q(Gn, G∗,n) ≤ |∆µn|2/|∆vn| → 0, (λ∗ n − λn)|∆µn|3|∆µ∗ n|/Q(Gn, G∗,n) ≤ |∆µn||∆µ∗ n|/|∆v∗ n| → 0, (λ∗ n − λn)|∆µn||∆µ∗ n|3/Q(Gn, G∗,n) ≤ |∆µn||∆µ∗ n|/|∆vn| → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining these results with A4 → 0, we achieve (λ∗ n − λn)∆vn∆v∗ n/Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, we can easily verify that all the results in (22) hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Thus, by using the same argument as that of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1, we would get Q(Gn, G∗,n)/Q(Gn, G∗,n) → 0, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 cannot happen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: λ∗ n/λn → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Remind that M′ n = max � |∆µn|2, |∆vn|, |∆µ∗ n|2, |∆v∗ n| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We can verify that Q(Gn, G∗,n) ≲ λ∗ n(M′ n)4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By dividing both the numerator and the denominator of A1 and A2 respectively by λ∗ n(M′ n)1/2 and λ∗ nM′ n, given that the new denominators go to 0 we would obtain 32 the new numerators also go to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', we have the following results λn∆µn n/ � λ∗ n(M′ n)1/2� − ∆µ∗ n/(M′ n)1/2 → 0, � (λ∗ n − λn)∆µn∆µ∗ n + λn∆vn − λ∗ n∆v∗ n � / � λ∗ nM′ n � → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since λn/λ∗ n → 0, the first limit implies that ∆µ∗ n/M′ n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining this result with the second limit, we obtain ∆v∗ n/M′ n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we would have M′ n = max � |∆µn|2, |∆vn| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Without loss of generality, we assume that M′ n = |∆µn|2 as the argument for other possibility of M′ n can be argued in the similar fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With these assumptions, |∆vn − ∆v∗ n|/|∆µn|2 ̸→ ∞, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', as n is large enough we get |∆vn − ∆v∗ n| ≲ |∆µn|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, we have two distinct cases Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1: λ∗ n max � |∆µ∗ n|2, |∆v∗ n| � /(λn|∆µn|2) → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Due to this assumption, we can check that as n is large enough, Q(Gn, G∗,n) ≍ λ∗ n|∆µn|2 max � |∆µ∗ n|2, |∆v∗ n| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If max � |∆µ∗ n|2, |∆v∗ n| � = |∆µ∗ n|2 for all n, then by dividing both the numerator and denominator of A1 by λ∗ n∆µ∗ n, given that the new denominator of A1 goes to 0, its new numerator must go to 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=', we have λn∆µn/(λ∗ n∆µ∗ n) → 1, which cannot hold since λ|∆µn|2/(λ∗ n|∆µ∗ n|2) → 0 (assumption of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1) and |∆µn|/|∆µ∗ n| → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we must have max � |∆µ∗ n|2, |∆v∗ n| � = |∆v∗ n| for all n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By dividing both the numerator and denominator of A2 by λ∗ n∆v∗ n, as the new denominator of A2 goes to 0, we would have (λ∗ n − λn)∆µn∆µ∗ n λ∗n∆v∗n + λn∆vn λ∗n∆v∗n − 1 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since λn|∆vn| λ∗n|∆v∗n| ≤ λn|∆µn|2 λ∗n|∆v∗n| → 0 and (λ∗ n−λn)/λ∗ n → 1, the above limit shows that ∆µn∆µ∗ n/∆v∗ n → 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since (∆µn)2/|∆v∗ n| → ∞, it implies that (∆µ∗ n)2/∆v∗ n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, by combing the result that A1 → 0 and A2 → 0, since F3 → 0, we can verify that it is equivalent to A3 := � (λ∗ n − λn)∆µn∆µ∗ n(∆µn − 2∆µ∗ n) � /3 + (λ∗ n − λn)∆µ∗ n∆vn Q(Gn, G∗,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By dividing both the numerator and the denominator of A3 by λ∗ n∆µn∆v∗ n, we obtain � (λ∗ n − λn)∆µn∆µ∗ n(∆µn − 2∆µ∗ n) � /3 + (λ∗ n − λn)∆µ∗ n∆vn λ∗n∆µn∆v∗n → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As (∆µ∗ n)2/∆v∗ n → 0 and ∆µn∆µ∗ n/∆v∗ n → 1, the above limit leads to ∆µ∗ n∆vn/(∆µn∆v∗ n) → −1/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, by studying A4 → 0 with the assumption that Q(Gn, G∗,n) ≍ λ∗ n|∆µn|2|∆v∗ n|, we eventually get the equation 1/24 − 1/12 = 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 cannot hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 33 Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2: λ∗ n max � |∆µ∗ n|2, |∆v∗ n| � /λn|∆µn|2 ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, as n is large enough, we would have λ∗ n max � |∆µ∗ n|2, |∆v∗ n| � ≲ (λn|∆µn|2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, we achieve under this case that Q(Gn, G∗,n) ≍ λn|∆µn|4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Denote ∆µ∗ n = ln 1 ∆µn, ∆vn = ln 2 (∆µn)2, and ∆v∗ n = ln 3 (∆µn)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From the assumptions of Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2, we would have ln 1 → 0 and ln 3 → 0 while ln 2 ̸→ ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Additionally, λ∗ n max � (ln 1 )2, |ln 3 | � /λn ̸→ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By dividing the numerators and denominators of Ai by λn(∆µn)i for 1 ≤ i ≤ 3, we achieve the following system of limits λ∗ nln 1 λn − 1 → 0, (λ∗ n − λn)ln 1 λn + ln 2 − λ∗ nln 3 λn → 0, λ∗ n − λn λn �ln 1 − (ln 1 )2 3 + ln 1ln 2 � → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (25) As ln 1 → 0, the first limit in the above system implies that λ∗ n(ln 1 )2/λn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' If we have max � (ln 1 )2, |ln 2 | � = |ln 1 |2 for all n, the previous result would mean that λ∗ nln 3 /λn → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, the second limit in (25) demonstrates that ln 2 → −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' However, plugging these results to the third limit in this system would yield 1/3 − 1 = 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, we must have max � (ln 1 )2, |ln 2 | � = |ln 3 | for all n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Under this setting, by denoting λ∗ nln 3 λn → a as n → ∞, the first and second limit in (25) leads to ln 2 → a − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With this result, the third limit in this system shows that a = 2/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' With these results, by dividing both the numerator and denominator of A4 by λn(∆µn)4, we quickly achieve the equation 1/24 − 5/72 = 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 cannot hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In sum, not all the coefficients of ∂|β|f ∂µβ (x|µ∗ n, v∗ n) as 1 ≤ |β| ≤ 8 go to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, by using the same argument as that of Proposition 2 and Proposition 3, we achieve the result of part (b) of the proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, we reach the conclusion of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' B Proofs for convergence rates and minimax lower bounds In this appendix, we provide the proofs for the convergence rates of the MLE as well as the corre- sponding minimax lower bounds introduced in Section B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 Proof of Theorem 5 (a) For any G1 = G1(λ1, µ1, Σ1) and G2 = G2(λ2, µ2, Σ2), we denote the following distance d1(G1, G2) = λ1||(µ1, Σ1) − (µ2, Σ2)||, d2(G1, G2) = |λ1 − λ2|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Even though d2(G1, G2) is a proper distance, it is clear that d(G1, G2) is not symmetric and only satisfies a weak triangle inequality, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' we have d1(G1, G3) + d1(G2, G3) ≥ min {d1(G1, G2), d1(G2, G1)} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we will utilize the modification of Le Cam method for nonsymmetric loss in Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 of [12] to deal with such distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We start with the following proposition Proposition 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Given that f satisfies assumption (S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1) in Theorem 5, we achieve for any r < 1 that (i) lim ǫ→0 inf G1=(λ,µ1,Σ1),G2=(λ,µ2,Σ2) {h(pG1, pG2)/dr 1(G1, G2) : d1(G1, G2) ≤ ǫ} = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 34 (ii) lim ǫ→0 inf G1=(λ1,µ,Σ),G2=(λ2,µ,Σ) {h(pG1, pG2)/dr 2(G1, G2) : d2(G1, G2) ≤ ǫ} = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (i) For any sequences G1,n = (λn, µ1,n, Σ1,n) and G2,n = (λn, µ2,n, Σ2,n), we have h2(pG1,n, pG2,n) ≤ 1 λn � (pG1,n(x) − pG2,n(x))2 f(x|µ2,n, Σ2,n) dx = λn � (f(x|µ1,n, Σ1,n) − f(x|µ2,n, Σ2,n))2 f(x|µ2,n, Σ2,n) dx where the first inequality is due to � pG1,n(x) + � pG2,n(x) > � λnf(x|µ2,n, Σ2,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By Taylor expan- sion up to the first order, we have f(x|µ1,n, Σ1,n) − f(x|µ2,n, Σ2,n) = � |α|=1 (µ1,n − µ2,n)α1(Σ1,n − Σ2,n)α2 α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' ∂f ∂µα1∂Σα2 (x|µ2,n, Σ2,n) + � |α|=1 (µ1,n − µ2,n)α1(Σ1,n − Σ2,n)α2 α1!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='α2!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 1 � 0 ∂f ∂µα1∂Σα2 (x|µ2,n + t(µ1,n − µ2,n), Σ2,n + t(Σ1,n − Σ2,n))dt Now, by choosing λ1−2r n ∥(µ1,n, Σ1,n) − (µ2,n, Σ2,n)∥2−2r → 0, and ∥(µ1,n, Σ1,n) − (µ2,n, Σ2,n)∥ → 0 and using condition (S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1), we can easily verify that h(pG1,n, pG2,n)/dr 1(G1,n, G2,n) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we achieve the conclusion of part (i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (ii) The argument for this part is essentially similar to that in part (i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In fact, for any two sequences G′ 1,n = (λ1,n, µn, Σn) and G′ 2,n = (λ2,n, µn, Σn), we also obtain h2(pG′ 1,n, pG′ 2,n) d2r 2 (G′ 1,n, G′ 2,n) ≤ (λ1,n − λ2,n)2−2r (1 − λ1,n) ∧ λ1,n � (h0(x|µ0, Σ0) − f(x|µn, Σn))2 h0(x|µ0, Σ0) + f(x|µn, Σn) dx ≤ 2(λ1,n − λ2,n)2−2r (1 − λ1,n) ∧ λ1,n By choosing (λ1,n−λ2,n)2−2r/ {(1 − λ1,n) ∧ λ1,n} → 0, we also achieve the conclusion of part (ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Now, given G∗ = (λ∗, µ∗, Σ∗) and r < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Let C0 be any fixed constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' According to part (i) of Proposition 5, for any sufficiently small ǫ > 0, there exists G′ ∗ = (λ∗, µ∗ 1, Σ∗ 1) such that d1(G∗, G′ ∗) = d1(G′ ∗, G∗) = ǫ and h(pG∗, pG′∗) ≤ C0ǫr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By means of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 of [12], we achieve inf �Gn∈Ξ sup G∈Ξ EpG � λ2∥(�µn, �Σn) − (µ, Σ)∥2 � ≥ ǫ2 2 � 1 − V (pn G∗, pn G′∗) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' where pn G∗ denotes the density of the n-iid sample X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' , Xn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From there, V (pn G∗, pn G′∗) ≤ h(pn G∗, pn G′∗) = � 1 − � 1 − h2(pG∗, pG′∗) �n ≤ � 1 − (1 − C2 0ǫ2r)n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 35 Hence, we obtain inf � Gn∈Ξ sup G∈Ξ EpG � λ2∥(�µn, �Σn) − (µ, Σ)∥2 � ≥ ǫ2 2 � 1 − (1 − C2 0ǫ2r)n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' By choosing ǫ2r = 1 C2 0n, we achieve inf �Gn∈Ξ sup G∈Ξ EpG � λ2∥(�µn, �Σn) − (µ, Σ)∥2 � ≥ c1n−1/r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' for any r < 1 where c1 is some positive constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Using the similar argument, with the result of (ii) in Proposition 5 we also immediately obtain the result inf � Gn∈Ξ sup G∈Ξ EpG � |�λn − λ|2 � ≥ c2n−1/r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, we reach the conclusion of part (a) of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) The proof of this part is a direct consequence of Theorem 2 and Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Indeed, for �Gn = (�λn, �µn, �Σn) being the MLE as in equation (3), we have EpG∗ � |ˆλn − λ∗| + λ∗∥(�µn, �Σn) − (µ∗, Σ∗)∥ � Thm 2 ≲ EpG∗V (p � Gn, pG∗) ≤ EpG∗h(p � Gn, pG∗) Thm 1 ≲ log n √n Because all inequalities are uniform in G∗, we achieve the conclusion of part (b) of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='2 Proof of Theorem 6 (a) Similar to the proof argument of part (a) of Theorem 5, we define d3(G1, G2) = λ1∥(∆µ1, ∆Σ1)∥∥(µ1, Σ1) − (µ2, Σ2)∥, d4(G1, G2) = |λ1 − λ2|∥(∆µ1, ∆Σ1)∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' for any G1 = G1(λ1, µ1, Σ1) and G2 = G2(λ2, µ2, Σ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It is clear that both d3(G1, G2) and d4(G1, G2) still satisfy weak triangle inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To achieve the conclusion of this part, it suffices to demonstrate the following results (i) There exists two sequences G1,n = (λn, µ1,n, Σ1,n) ∈ Ξ1(ln) and G2,n = (λn, µ2,n, Σ2,n) ∈ Ξ1(ln) such that d3(G1,n, G2,n) → 0 and h(pG1,n, pG2,n)/dr 3(G1,n, G2,n) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (ii) There exists two sequences G′ 1,n = (λ1,n, µn, Σn) ∈ Ξ1(ln) and G′ 2,n = (λ2,n, µn, Σn) ∈ Ξ1(ln) such that d4(G1,n, G2,n) → 0 and h(pG′ 1,n, pG′ 2,n)/dr 4(G1,n, G2,n) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' for any r < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' The proof argument for the above results can proceed in a similar fashion as that of Proposition 5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' therefore, it is omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' We achieve the conclusion of part (a) of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) Combining the result of Theorem 3 and the fact that D(G, G∗) ≍ D(G, G∗) for any G and G∗, we immediately achieve the following convergence rates sup G∗∈Ξ EpG∗ � (λ∗)2∥(∆µ∗, ∆Σ∗)∥2∥(�µn, �Σn) − (µ∗, Σ∗)∥2 � ≲ log2 n n , sup G∗∈Ξ EpG∗ � ∥(∆�µn, ∆�Σn)∥2∥(∆µ∗, ∆Σ∗)∥2|�λn − λ∗|2 � ≲ log2 n n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (26) 36 It is clear that the second result in (26) does not match with the second result in the conclusion of part (b) of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' To circumvent this issue, we utilize the fact that G∗ ∈ Ξ1(ln).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Indeed, notice that (�µn, �Σn) − (µ∗, Σ∗) = (∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗), we have sup G∗∈Ξ EpG∗ ���(∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗) ��� 2 ∥(∆µ∗, ∆Σ∗)∥2 ≲ log2 n n(λ∗)2∥(∆µ∗, ∆Σ∗)∥4 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (27) Hence, by the AM-GM inequality, we have EpG∗∥(∆�µn, ∆�Σn)∥2(�λn − λ∗)2 ≥ 1 2∥(∆µ∗, ∆Σ∗)∥2EpG∗(�λn − λ∗)2 − EpG∗∥(∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗)∥2(�λn − λ∗)2 = 1 2∥(∆µ∗, ∆Σ∗)∥2 \uf8eb \uf8ec \uf8edEpG∗(�λn − λ∗)2 − EpG∗ ���(∆�µn, ∆�Σn) − (∆µ∗, ∆Σ∗) ��� 2 (�λn − λ∗)2 ∥(∆µ∗, ∆Σ∗)∥2 \uf8f6 \uf8f7 \uf8f8 ≳ ∥(∆µ∗, ∆Σ∗)∥EpG∗(�λn − λ∗)2, (28) uniformly in G∗, where in the last inequality we use (27) combining with the fact that |�λn − λ∗| is uniformly bounded by 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, EpG∗ � ∥(∆µ∗, ∆Σ∗)∥4|�λn − λ∗|2 � ≲ EpG∗ � ∥(∆�µn, ∆�Σn)∥2∥(∆µ∗, ∆Σ∗)∥2|�λn − λ∗|2 � ≲ log2(n) n , which is the conclusion of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='3 Proof of Theorem 7 (a) Similar to the proof argument of part (a) of Theorem 5, we define d5(G1, G2) = λ1∥(µ1, Σ1) − (µ2, Σ2)∥4, d6(G1, G2) = |λ1 − λ2|∥(∆µ1, ∆Σ1)∥4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' for any G1 = G1(λ1, µ1, Σ1) and G2 = G2(λ2, µ2, Σ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' It is clear that d6(G1, G2) satisfies weak triangle inequality while d5(G1, G2) no longer satisfies weak triangle inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' In particular, we have d5(G1, G3) + d5(G2, G3) ≥ min {d5(G1, G2), d5(G2, G1)} 8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' A close investigation of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content='1 of [12] reveals that modified Le Cam method still works under this setting of d5 metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' More specifically, for any ǫ > 0 the following holds inf � Gn∈Ξ sup G∈Ξ2(ln) EpG � d2 5(G, �Gn) � ≥ ǫ2 128 � 1 − V (pn G1, pn G2) � where G1, G2 ∈ Ξ2(ln) such that d5(G1, G2)∧d5(G1, G2) ≥ ǫ/4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From here, to achieve the conclusion of part (a), it suffices to demonstrate for any r < 1 that 37 (i) There exists two sequences G1,n = (λn, µ1,n, Σ1,n) ∈ Ξ2(ln) and G2,n = (λn, µ2,n, Σ2,n) ∈ Ξ1(ln) such that d5(G1,n, G2,n) → 0 and h(pG1,n, pG2,n)/dr 5(G1,n, G2,n) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (ii) There exists two sequences G′ 1,n = (λ1,n, µn, Σn) ∈ Ξ2(ln) and G′ 2,n = (λ2,n, µn, Σn) ∈ Ξ1(ln) such that d6(G1,n, G2,n) → 0 and h(pG′ 1,n, pG′ 2,n)/dr 6(G1,n, G2,n) as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Following the proof argument of Proposition 5, we can quickly verify the above results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, we reach the conclusion of part (a) of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' (b) From the discussion after Theorem 3, we can show that: Q(G, G∗) ≍ |λ − λ∗|(∥∆µ∥2∥∆Σ∥)(∥∆µ∗∥2∥∆Σ∗∥) + (∥µ − µ∗∥2 + ∥Σ − Σ∗∥)∥(λ(∥∆µ∥2 + ∥∆Σ∥) +λ∗(∥∆µ∗∥2 + ∥∆Σ∗∥)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, from Theorem 4 combining with Theorem 1, we have sup G∗ EpG∗(λ∗)2(∥�µn − µ∗∥4 + ∥�Σn − Σ∗∥2)(∥∆µ∗∥4 + ∥∆Σ∗∥2) ≲ log2(n) n sup G∗ EpG∗|�λn − λ∗|2(∥∆�µn∥4∥∆�Σn∥2)(∥∆µ∗∥4∥∆Σ∗∥2) ≲ log2(n) n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Similar to the proof of Theorem 6 and with the definition of Ξ2(l2), we have EpG∗|�λn − λ∗|2(∥∆�µn∥4∥∆�Σn∥2) ≳ (∥∆µ∗∥4∥∆Σ∗∥2)EpG∗|�λn − λ∗|2 uniformly in G∗ ∈ Ξ2(l2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Hence, sup G∗∈Ξ2(l2) EpG∗|�λn − λ∗|2(∥∆µ∗∥8∥∆Σ∗∥4) ≲ log2(n) n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' As a consequence, we obtain the conclusion of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' C Proofs for auxiliary results Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' For any r ≥ 1, we define Dr(G, G∗) = λ∥(∆µ, ∆Σ)∥r + λ∗∥(∆µ∗, ∆Σ∗)∥r − min {λ, λ∗} � ∥(∆µ, ∆Σ)∥r + ∥(∆µ∗, ∆Σ∗)∥r − ∥(µ, Σ) − (µ∗, Σ∗)∥r � , for any G and G∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Then, we have W r r (G, G∗) ≍ Dr(G, G∗) for any r ≥ 1 where Wr is the r-th order Wasserstein distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Without loss of generality, we assume throughout the lemma that λ < λ∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we obtain from the formulation of Dr(G, G∗) that Dr(G, G∗) = (λ∗ − λ)||(∆µ∗, ∆Σ∗)||r + λ||(µ, Σ) − (µ∗, Σ∗)||r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Direct computation of W r r (G, G∗) yields three distinct cases: 38 Case 1: If ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≥ ||(µ, Σ) − (µ∗, Σ∗)||r, then W r r (G, G∗) = λ||(∆µ, ∆Σ)||r + λ∗||(∆µ∗, ∆Σ∗)||r − min {λ, λ∗} (||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r − ||(µ, Σ) − (µ∗, Σ∗)||r) = Dr(G, G∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 2: If ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r < ||(µ, Σ) − (µ∗, Σ∗)||r and λ + λ∗ ≤ 1, then W r r (G, G∗) = λ||(∆µ, ∆Σ)||r + λ∗||(∆µ∗, ∆Σ∗)||r = (λ∗ − λ)||(∆µ∗, ∆Σ∗)||r + λ(||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' From Cauchy-Schartz’s inequality, we have ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≳ ||(µ, Σ) − (µ∗, Σ∗)||r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, under Case 2 we have ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≍ ||(µ, Σ) − (µ∗, Σ∗)||r, which directly implies that W r r (G, G∗) ≍ Dr(G, G∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Case 3: If ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r < ||(µ, Σ) − (µ∗, Σ∗)||r and λ + λ∗ > 1, then W r r (G, G∗) = (1 − λ∗)||(∆µ, ∆Σ)||r + (1 − λ)||(∆µ∗, ∆Σ∗)||r + (λ + λ∗ − 1)||(µ, Σ) − (µ∗, Σ∗)||r = (λ∗ − λ)||(∆µ∗, ∆Σ∗)||r + (1 − λ∗)(||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r) + (λ∗ + λ − 1)||(µ, Σ) − (µ∗, Σ∗)||r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Since ||(∆µ, ∆Σ)||r + ||(∆µ∗, ∆Σ∗)||r ≍ ||(µ, Σ) − (µ∗, Σ∗)||r, we achieve (1 − λ∗)(||(∆µ, ∆Σ)||r ≍ (1 − λ∗)||(µ, Σ) − (µ∗, Σ∗)||r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Therefore, we also have W r r (G, G∗) ≍ Dr(G, G∗) under Case 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' Combining the results from these cases, we reach the conclusion of the lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} +page_content=' 39' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFKT4oBgHgl3EQfai5n/content/2301.11808v1.pdf'} diff --git a/FdAyT4oBgHgl3EQfrPl7/vector_store/index.faiss b/FdAyT4oBgHgl3EQfrPl7/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..a0839592469e9ddf484211c256688989d881298c --- /dev/null +++ b/FdAyT4oBgHgl3EQfrPl7/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c19b4fec2bcca257d4d2f0233f9ffef5007d5407337f71fbaa1a9ea877dc3c3 +size 6094893 diff --git a/GNAyT4oBgHgl3EQfrfkX/vector_store/index.faiss b/GNAyT4oBgHgl3EQfrfkX/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..0617e23eaac7e46af19cd9978f7c11ab1b62cd96 --- /dev/null +++ b/GNAyT4oBgHgl3EQfrfkX/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:354076ef899ad8e1d77c756985a3f7025f0f11c951bf65f233cfc8c0a1d42103 +size 1966125 diff --git a/GdA0T4oBgHgl3EQfBf_u/vector_store/index.faiss b/GdA0T4oBgHgl3EQfBf_u/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..d5a09da9f81470e4a5fbde3b106c4f957d8a6aac --- /dev/null +++ b/GdA0T4oBgHgl3EQfBf_u/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e831a1275f70b3b93413da0da7aa98ba12dc9a98147050f27c4273f5a056374 +size 393261 diff --git a/GdAyT4oBgHgl3EQfrflY/content/2301.00561v1.pdf b/GdAyT4oBgHgl3EQfrflY/content/2301.00561v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a2f45a0935b8b9e302ce22fc6a766baf7de474d5 --- /dev/null +++ b/GdAyT4oBgHgl3EQfrflY/content/2301.00561v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:025a0ef5d736ac0c0d7ee58e9b19caeb63d3570f77c384eb2d201fdaf00d7453 +size 164247 diff --git a/GdAyT4oBgHgl3EQfrflY/vector_store/index.faiss b/GdAyT4oBgHgl3EQfrflY/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..beb1162cb85c72a8097ea7d155f1f5c31bb46cd2 --- /dev/null +++ b/GdAyT4oBgHgl3EQfrflY/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7f8d536b4882104de5fb275cdab8f06e81bb60883257714fccc43a3930af124 +size 2687021 diff --git a/GdAyT4oBgHgl3EQfrflY/vector_store/index.pkl b/GdAyT4oBgHgl3EQfrflY/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c0db4a34e769a8f6e926e00c0a599f40ac82f854 --- /dev/null +++ b/GdAyT4oBgHgl3EQfrflY/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13302bd243615a952202282018eae46269c6a06d5367ffe2064f4de8f30e25e0 +size 101498 diff --git a/GtE1T4oBgHgl3EQfXQSk/content/tmp_files/2301.03125v1.pdf.txt b/GtE1T4oBgHgl3EQfXQSk/content/tmp_files/2301.03125v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eeb30b1286182ec69e2f5aea5dc54c1aed7098a --- /dev/null +++ b/GtE1T4oBgHgl3EQfXQSk/content/tmp_files/2301.03125v1.pdf.txt @@ -0,0 +1,4859 @@ +arXiv:2301.03125v1 [stat.ML] 9 Jan 2023 +Sharper Analysis for Minibatch Stochastic Proximal +Point Methods: Stability, Smoothness, and Deviation +Xiao-Tong Yuan and Ping Li +Cognitive Computing Lab +Baidu Research +No. 10 Xibeiwang East Road, Beijing 100193, China +10900 NE 8th St. Bellevue, Washington 98004, USA +E-mail: {xtyuan1980, pingli98}@gmail.com +Abstract +The stochastic proximal point (SPP) methods have gained recent attention for stochastic opti- +mization, with strong convergence guarantees and superior robustness to the classic stochastic +gradient descent (SGD) methods showcased at little to no cost of computational overhead added. +In this article, we study a minibatch variant of SPP, namely M-SPP, for solving convex com- +posite risk minimization problems. The core contribution is a set of novel excess risk bounds +of M-SPP derived through the lens of algorithmic stability theory. Particularly under smooth- +ness and quadratic growth conditions, we show that M-SPP with minibatch-size n and iteration +count T enjoys an in-expectation fast rate of convergence consisting of an O +� 1 +T 2 +� +bias decaying +term and an O +� 1 +nT +� +variance decaying term. In the small-n-large-T setting, this result substan- +tially improves the best known results of SPP-type approaches by revealing the impact of noise +level of model on convergence rate. In the complementary small-T -large-n regime, we provide a +two-phase extension of M-SPP to achieve comparable convergence rates. Moreover, we derive a +near-tight high probability (over the randomness of data) bound on the parameter estimation +error of a sampling-without-replacement variant of M-SPP. Numerical evidences are provided to +support our theoretical predictions when substantialized to Lasso and logistic regression models. +1 + +1 +Introduction +We consider the following problem of regularized risk minimization over a closed convex subset +W ⊆ Rp: +min +w∈W R(w) := Rℓ(w) + r(w), +where Rℓ(w) := Ez∼D[ℓ(w; z)], +(1) +where ℓ : W × Z �→ R+ is a non-negative convex loss function whose value ℓ(w; z) measures the +loss of a hypothesis, parameterized by w ∈ W, evaluated over a data sample z ∈ Z, D represents +a distribution over Z, and r : W �→ R+ is a data-independent non-negative convex function whose +value r(w) measures certain complexity of the hypothesis. We are particularly interested in the +situation where the composite population risk R is strongly convex around its minimizers, though +in this setting the terms Rℓ and r are not necessarily required to be so simultaneously. For an +instance, the ℓ1-norm regularizer r(w) = µ∥w∥1 or its grouped variants are often used for sparse +generalized linear models learning with quadratic or logistic loss functions (Van de Geer, 2008; +Ravikumar et al., 2009; Negahban et al., 2012). +In statistical machine learning, it is usually assumed that the estimator only has access to, +either as a batch training set or in an online/incremental manner, a collection S = {zi}N +i=1 of i.i.d. +random data instances drawn from D. The goal is to compute a stochastic estimator ˆwS based on +the knowledge of S, hopefully that it generalizes well as a near minimizer of the population risk. +More precisely, we aim at deriving a suitable law of large numbers, i.e., a sample size vanishing rate +δN so that the excess risk at ˆwS satisfies R( ˆwS) − R∗ ≤ δN in expectation or with high probability +over S where R∗ := minw∈W R(w) represents the minimal value of composite risk. +In this work, inspired by the recent remarkable success of the stochastic proximal point (SPP) +algorithms (Patrascu and Necoara, 2017; Asi and Duchi, 2019a,b; Davis and Drusvyatskiy, 2019) +and their minibatch extensions (Wang et al., 2017b; Zhou et al., 2019; Asi et al., 2020), we provide +a sharper generalization performance analysis for a class of minibatch SPP methods for solving the +stochastic composite risk minimization problem (1). +1.1 +Algorithm and Motivation of Study +Minibatch Stochastic Proximal Point Algorithm. +Let St = {zi,t}n +i=1 be a minibatch of n i.i.d. +samples drawn from distribution D at time instance t ≥ 1 and denote +RSt(w) := 1 +n +n +� +i=1 +ℓ(w; zi,t) + r(w) +as the regularized empirical risk over St. We consider the Minibatch Stochastic Proximal Point (M- +SPP) algorithm, as outlined in Algorithm 1, for composite risk minimization based on a sequence of +data minibatches S = {St}T +t=1. The precision value ǫt in the algorithm quantifies the sub-optimality +of wt for solving the inner-loop regularized ERM over the minibatch St. The M-SPP algorithm +is generic and it encompasses several existing SPP methods as special cases. For example in the +2 + +Algorithm 1: Minibatch Stochastic Proximal Point (M-SPP) +Input +: Regularization modulus {γt}t≥1. +Output: ¯wT as a weighted average of {wt}1≤t≤T . +Initialization Specify a value of w0. Typically w0 = 0. +for t = 1, 2, ..., T do +Sample a minibatch St := {zi,t}n +i=1 +i.i.d. +∼ Dn and estimate wt satisfying +Ft(wt) ≤ min +w∈W +� +Ft(w) := RSt(w) + γt +2 ∥w − wt−1∥2� ++ ǫt, +(2) +where RSt(w) := 1 +n +�n +i=1 ℓ(w; zi,t) + r(w) and ǫt ≥ 0 measures the sub-optimality of +estimation. +end +extreme case when n = 1 and ǫt ≡ 0 M-SPP reduces to a composite variant of the standard SPP +method (Bertsekas, 2011), as formulated in (5). In general, the recursion update formulation (2) +can be regarded as a natural composite extension of the existing minibatch stochastic proximal +point methods for statistical estimation (Wang et al., 2017b; Asi et al., 2020). +Prior results and limitations. The present study focuses on the generalization analysis of M-SPP +for convex composite risk optimization. Recently, it has been shown by Asi et al. (2020, Theorem +2) that if the instantaneous loss functions are strongly convex with respect to the parameters, then +the M-SPP algorithm converges at the rate of O +� +log(nT) +nT +� +. +Prior to that, Wang et al. (2017b, +Theorem 5) proved an O( 1 +nT ) rate for M-SPP when the individual loss functions are Lipschitz +continuous and strongly convex. +There results, among others for SPP (Patrascu and Necoara, +2017; Davis and Drusvyatskiy, 2019), commonly require that each instantaneous loss should be +strongly convex which is too stringent to be fulfilled in high-dimensional or infinite spaces. For +an instance, the quadratic loss ℓ(w; z) = +1 +2(w⊤x − y)2 over a feature-label pair z = (x, y) is +convex but in general not strongly convex, although the population risk Rℓ(w) = 1 +2E(y − w⊤x)2 is +strongly convex provided that the covariance matrix of random feature x is non-degenerate. In the +meanwhile, the Lipschitz-loss assumption made for the analysis (Wang et al., 2017b, Theorem 5) +limits its applicability to smooth losses like quadratic loss, not to mention an interaction between +Lipschitz continuity and strong convexity (Agarwal et al., 2012; Asi and Duchi, 2019b). +The above mentioned deficiencies of prior results motivate us to investigate the convergence be- +havior of M-SPP for composite risk minimization beyond the setting where each individual loss is +strongly convex and Lipschitz continuous. From the perspective of optimization, smoothness is es- +sential for establishing strong convergence guarantees for solving the inner-loop strongly convex risk +minimization subproblems in (6), e.g., with variance reduced stochastic algorithms (Johnson and Zhang, +2013; Xiao and Zhang, 2014) or communication-efficient distributed optimization algorithms (Shamir et al., +2014; Zhang and Lin, 2015; Yuan and Li, 2020). Aiming at covering such an important yet less un- +3 + +derstood problem regime, we focus our study on analyzing the convergence behavior of M-SPP when +the convex loss functions are smooth and the risk function exhibits quadratic growth property (see +Assumption 2 for a formal definition). +1.2 +Our Contributions and Main Results +The main contribution of the present work is a sharper non-asymptotic convergence analysis of the +M-SPP algorithm through the lens of algorithmic stability theory (Bousquet and Elisseeff, 2002; +Feldman and Vondr´ak, 2018). Let W ∗ := {w ∈ W : R(w) = R∗} be the set of minimizers of the +composite population risk R. We are particularly interested in the regime where the loss function +ℓ is convex and smooth but not necessarily Lipschitz (e.g., quadratic loss), while the population +risk R satisfies the quadratic growth condition, i.e., R(w) − R∗ ≥ λ +2 minw∗∈W ∗ ∥w − w∗∥2, ∀w ∈ W, +for some λ > 0, which can be satisfied by strongly convex objectives, and various other statistical +estimation problems (see, e.g., Karimi et al., 2016; Drusvyatskiy and Lewis, 2018). For the family +of L-smooth loss functions, with γt = O(λρt) for an arbitrary scalar ρ ∈ (0, 0.5] and ǫt ≡ 0, we +show in Theorem 1 that the excess risk at the weighted average output ¯wT = +2 +T(T+1) +�T +t=1 twt is in +expectation upper bounded by the following bound: +R( ¯wT ) − R∗ ≲ ρ [R(w0) − R∗] +T 2 ++ LR∗ +ρλnT . +(3) +In this composite bound, the first bias component associated with initial gap R(w0) − R∗ has a +decaying rate O +� 1 +T 2 +� +and the second variance component associated with R∗ converges at the +rate of O +� +1 +λnT +� +. The variance decaying rate actually matches the corresponding optimal rates of +the SGD-type methods for strongly convex optimization (Rakhlin et al., 2012; Dieuleveut et al., +2017; Woodworth and Srebro, 2021). Also, such an O +� 1 +T 2 + +1 +λnT +� +bounds matches those bounds +for SPP (Davis and Drusvyatskiy, 2019) or M-SPP (Wang et al., 2017b) which are in contrast +obtained under a substantially stronger assumption that each individual loss function should be +strongly convex and Lipschitz as well. In the realizable or near realizable machine learning regimes +where R∗ equals to or approximates zero, the variance term in (3) would be sharper than those +bounds of Wang et al. (2017b); Davis and Drusvyatskiy (2019). To our best knowledge, the bound +in (3) for smooth and convex loss functions is new to the SPP-type methods. More generally for +arbitrary convex risk functions, we present in Theorem 3 an O( +1 +√ +nT ) excess risk bound for exact +M-SPP. Further, as shown in Theorem 4 and Theorem 5, similar results can be extended to the +inexact M-SPP given that the inner-loop sub-optimality is sufficiently small. +In the regime T ≪ n which is of special interest for off-line incremental learning with large data +batches, setting a near-optimal value ρ = +� +T +nλ in the excess risk bound (3) yields an O +� +1 +T +√ +λnT +� +rate of convergence. This rate, in terms of n, is substantially slower than the O( +1 +λnT ) rate available +for the previous small-n-large-T setup. In order to address such a deficiency, we propose a two- +phase variant of M-SSP (see Algorithm 2) to boost its performance in the small-T-large-n regime: +4 + +in the first phase, M-SPP with sufficiently small minibatch-size is invoked over S1 to obtain w1, and +then initialized by w1 the second phase applies M-SPP to the rest minibatches. Then in Theorem 2 +we show that the in-expectation excess risk at the output of the second phase can be accelerated +to scale as +R( ¯wT ) − R∗ ≲ L2(R(w0) − R∗) +λ2n2T 2 ++ LR∗ +λnT , +(4) +which holds regardless to the mutual strength of minibatch size n and iteration count T. +In addition to the above in-expectation risk bounds, we further derive a high-probability model +estimation error bound of M-SPP based on algorithmic stability theory. Our deviation analysis is +carried out over a sampling-without-replacement variant of M-SPP (see Algorithm 3). For popula- +tion risk with quadratic growth property, up to an additive term on the inner-loop sub-optimality +ǫt, we establish in Theorem 6 the following deviation bound on the estimation error D( ¯wT , W ∗) that +holds with probability at least 1 − δ over S while in expectation over the randomness of sampling: +D( ¯wT , W ∗) ≲ +� +L log(1/δ) log(T) +λ +√ +nT ++ +� +[R(w0) − R∗] +λT 2 ++ +LR∗ +ρλ2nT . +When T = Ω(n), up to the logarithmic factors, this above bound matches (in terms of the total +sample size N = nT) the known minimax lower bounds for statistical estimation even without +computational limits (Tsybakov, 2008). +To highlight the core contribution of this work, the following three new insights into M-SPP +make our results distinguished from the best known of SPP-type methods for convex optimization: +1. First and for most, the fast rates in (3) and (4) reveal the impact of noise level, as quanti- +fied by R∗, to convergence rate which has not been previously known for SPP-type methods. +These bounds are valid for smooth losses and thus complement the previous ones for Lipschitz +losses (Patrascu and Necoara, 2017; Wang et al., 2017b; Davis and Drusvyatskiy, 2019). +2. Second, the risk bounds in (3) and (4) are established under the quadratic growth condition of +population risk. This is substantially weaker than the instantaneous-loss-wise strong convexity +assumption commonly imposed by prior analysis to achieve the comparable rates for SPP-type +methods (Toulis and Airoldi, 2017; Wang et al., 2017b; Asi et al., 2020). +3. Third, we provide a deviation analysis of M-SPP from the viewpoint of uniform algorithmic +stability which to our best knowledge has not yet been addressed in the previous study on +SPP-type methods. +We should emphasize that, while we provide some insights into the numerical aspects of M-SPP +through an empirical study, this work is largely a theoretical contribution. +5 + +1.3 +Related Work +Our work is situated at the intersection of two lines of machine learning research: stochastic +optimization and algorithmic stability theory, both of which have been actively studied with a vast +body of beautiful and insightful theoretical results established in literature. We next incompletely +review some representative work that are closely relevant to ours. +Stochastic optimization. Stemming from the pioneering work of Robbins and Monro (1951), +stochastic gradient descent (SGD) methods have been extensively studied to approximately solve a +simplified version of the problem (1) with r ≡ 0 (Zhang, 2004; Nemirovski et al., 2009; Rakhlin et al., +2012; Bottou et al., 2018). For the composite formulation, a vast body of proximal SGD methods +have been developed for efficient optimization in the presence of potentially non-smooth regulariz- +ers (Hu et al., 2009; Duchi et al., 2010; Ghadimi and Lan, 2012; Lan, 2012; Kulunchakov and Mairal, +2019). +To handle the challenges associated with stepsize selection and numerical instability of +SGD (Nemirovski et al., 2009; Bach and Moulines, 2011), a number of more sophisticated meth- +ods including implicit stochastic/online learning (Crammer et al., 2006; Kulis and Bartlett, 2010; +Toulis et al., 2016; Toulis and Airoldi, 2017) and stochastic proximal point (SPP) methods (Bertsekas, +2011; Patrascu and Necoara, 2017; Asi and Duchi, 2019a,b; Davis and Drusvyatskiy, 2019) have re- +cently been investigated for enhancing stability and adaptivity of stochastic (composite) optimiza- +tion. For an example, in our considered composite optimization regime, the iteration procedure of +vanilla SPP can be expressed as the following recursion form for i ≥ 1: +ˆwspp +i +:= arg min +w∈W +ℓ(w; zi) + r(w) + γi +2 ∥w − ˆwspp +i−1∥2, +(5) +where zi ∼ D is a random data sample, γi is a regularization modulus and ∥ · ∥ stands for the +Euclidean norm. In contrast to standard SGD methods which are simple in per-iteration modeling +but brittle to stepsize choice, the SPP methods are more accurate in objective approximation which +leads to substantially improved stability to the choice of algorithm hyper-parameters while enjoying +optimal guarantees on convergence (Asi and Duchi, 2019a,b). +An attractive feature of these above (proximal) stochastic optimization methods is that their +convergence guarantees directly apply to the population risk and the minimax optimal rates of order +O( 1 +T ) are achievable after T rounds of iteration for strongly convex problems (Nemirovski et al., +2009; Agarwal et al., 2012; Rakhlin et al., 2012). For large-scale machine learning, the improved +memory efficiency is another practical argument in favor of stochastic over batch optimization +methods. However, due to the sequential processing nature, the stochastic optimization methods +tend to be less efficient for parallelization especially in distributed computing environment where +excessive communication between nodes would be required for model update (Bottou et al., 2018). +Empirical risk minimization. At the opposite end of SGD-type and online learning, the following +defined (composite) empirical risk minimization (ERM, a.k.a., M-estimation) is another popularly +6 + +studied formulation for statistical learning (Lehmann and Casella, 2006): +ˆwerm +S +:= arg min +w∈W +� +RS(w) := 1 +N +N +� +i=1 +ℓ(w; zi) + r(w) +� +. +Thanks to the finite-sum structure, a large body of randomized incremental algorithms with lin- +ear rates of convergence have been established for ERM including SVRG (Johnson and Zhang, +2013; Xiao and Zhang, 2014), SAGA (Defazio et al., 2014) and Katyusha (Allen-Zhu, 2017), to +name a few. From the perspective of distributed computation, one intrinsic advantage of ERM +over SGD-type methods lies in that it can better explore the statistical correlation among data +samples for designing communication-efficient distributed optimization algorithms (Jaggi et al., +2014; Shamir et al., 2014; Zhang and Lin, 2015; Lee et al., 2017). Unlike stochastic optimization +methods, the generalization performances of the batch or incremental algorithms are by nature +controlled by that of ERM (Bottou and Bousquet, 2007) which has long been studied with a bunch +of insightful results available (Vapnik, 1999; Bartlett et al., 2005; Srebro et al., 2010; Mei et al., +2018). Particularly for strongly convex risk functions, the O( 1 +N ) rate of convergence is possible for +ERM (Bartlett et al., 2005; Koltchinskii, 2006; Zhang et al., 2017), though these fast rates are in +general dimensionality-dependent for parametric learning models. +It has been recognized that SGD-type and ERM-type approaches cannot dominate each other +in terms of generalization, runtime, storage and parallelization efficiency. This motivates a recent +trend of trying to propose the so called stochastic model-based methods that can achieve the best +of two worlds. Among others, a popular paradigm for such a purpose of combination is minibatch +proximal update which in each iteration updates the model via (approximately) solving a local ERM +over a stochastic minibatch (Li et al., 2014; Wang et al., 2017b; Asi et al., 2020; Deng and Gao, +2021). This strategy can be viewed as a minibatch extension to the SPP algorithm and it has +been shown to attain a substantially improved trade-off between computation, communication and +memory efficiency for large-scale distributed machine learning (Li et al., 2014; Wang et al., 2017a). +Alternatively, a number of online extensions of the incremental finite-sum algorithms, such as +streaming SVRG (Frostig et al., 2015) and streaming SAGA (Jothimurugesan et al., 2018), have +been proposed for stochastic optimization with competitive guarantees to ERM but at lower cost +of computation. +Algorithmic stability and generalization. +Since the seminal work of Bousquet and Elisseeff +(2002), algorithmic stability has been extensively studied with remarkable success achieved in estab- +lishing generalization bounds for strongly convex ERM estimators (Zhang, 2003; Mukherjee et al., +2006; Shalev-Shwartz et al., 2010). Particularly, the state-of-the-art risk bounds of strongly convex +ERM are offered by approaches based on the notion of uniform stability (Feldman and Vondr´ak, +2018, 2019; Bousquet et al., 2020; Klochkov and Zhivotovskiy, 2021). It was shown by Hardt et al. +(2016) that the solution obtained via (stochastic) gradient descent is stable for smooth con- +vex or non-convex loss functions. +For non-smooth convex losses, the stability induced gener- +alization bounds of SGD have been established in expectation (Lei and Ying, 2020) or devia- +7 + +tion (Bassily et al., 2020). For learning with sparsity, algorithmic stability theory has been em- +ployed to derive the generalization bounds of the popularly used iterative hard thresholding (IHT) +algorithm (Yuan and Li, 2022). Through the lens of uniform algorithmic stability, convergence rates +of M-SPP have been studied for convex (Wang et al., 2017b) and weakly convex (Deng and Gao, +2021) Lipschitz losses. While sharing a similar spirit to Wang et al. (2017b); Deng and Gao (2021), +our analysis customized for smooth convex loss functions is considerably different and the resultant +fast rates are of special interest in low-noise statistical settings (Srebro et al., 2010). +1.4 +Notation and Paper Organization +Notation. +The key quantities and notations frequently used in our analysis are summarized in +Table 1. +Notation +Definition +n +minibatch size +T +round of iteration +N +total number of samples visited, i.e., N = nT +f +hypothesis +ℓ +loss function +r +regularization term +Rℓ +population risk: Rℓ(w) := E(x,y)∼D[ℓ(fw(x), y)] +R +composite population risk: R(w) := Rℓ(w) + r(w) +R∗ +the optimal value of composite risk, i.e., R∗ := minw∈W R(w) +W ∗ +the optimal solution set of composite risk, i.e., W ∗ := arg minw∈W R(w) +St +data minibatch at time instance t +SI +The union of data minibatch over I, i.e., SI := {St}t∈I +Rℓ +S +empirical risk over S, i.e., Rℓ +S(w) := +1 +|S| +� +(x,y)∈S ℓ(fw(x, y) +RS +composite empirical risk over S, i.e., RS(w) := Rℓ +S(w) + r(w) +ǫt +precision of minibatch risk minimization at time instance t +∥w∥1 +ℓ1-norm of a vector w, i.e., ∥w∥1 := � +i |[w]i| +∥w∥ +Euclidean norm of a vector w +D(w, W ∗) +the distance from w to W ∗, i.e., D(w, W ∗) = minw∗∈W ∗ ∥w − w∗∥ +[T] +[T] := {1, ..., T} +1{C} +the indicator function of the condition C +Table 1: Table of notation. +8 + +Organization. The paper proceeds with the material organized as follows: In Section 2, we analyze +the risk bounds of exact M-SPP with convex and smooth loss functions and present a two-phase +variant to further improve convergence performance. In Section 3, we extend our analysis to the +more realistic setting where inexact M-SPP iteration is allowed. In Section 4, we study the high- +probability estimation error bounds of M-SPP. A comprehensive comparison to some closely relevant +results is highlighted in Section 5. +The numerical study for theory verification and algorithm +evaluation is provided in Section 6. The concluding remarks are made in Section 7. All the proofs +of main results and some additional results on the iteration stability of M-SPP are relegated to +appendix. +2 +A Sharper Analysis of M-SPP for Smooth Loss +In this section, we analyze the convergence rate of M-SPP for smooth and convex loss functions +using the tools developed in algorithmic stability theory. In what follows, for the sake of notation +simplicity and presentation clarity of core ideas, we assume for the time being that the inner-loop +composite ERM in the M-SPP iteration procedure (2) has been solved exactly with ǫt ≡ 0, i.e., +wt = arg min +w∈W +� +Ft(w) := RSt(w) + γt +2 ∥w − wt−1∥2� +. +(6) +A full convergence analysis for the inexact variant (i.e., ǫt > 0) will be presented in the Section 3 +via a slightly more involved perturbation analysis. +2.1 +Basic Assumptions +We begin by introducing some basic assumptions that will be used in the analysis to follow. We +say a differentiable function g : W �→ R is L-smooth if ∀s, t ∈ R, +��g(w) − g(w′) − ⟨∇g(w), w − w′⟩ +�� ≤ L +2 |w − w′|2. +As formally stated in the following assumption, we suppose that the individual loss functions are +convex and L-smooth which can be satisfied, e.g., by the quadratic loss (for regression) and the +logistic loss (for prediction). +Assumption 1. The loss function ℓ is convex and L-smooth with respect to its first argument. +Also, we assume that the regularization term r is convex over W. +Let us define D(w, W ∗) := minw∗∈W ∗ ∥w − w∗∥ as the distance from w to the set W ∗ of +minimizers. The next assumption requires that the population risk has the characterization of +quadratic growth away from the set of minimizers (Anitescu, 2000; Karimi et al., 2016). +Assumption 2. The population risk function R satisfies R(w) ≥ R∗ + λ +2D2(w, W ∗), ∀w ∈ W for +some λ > 0. +9 + +Clearly, the quadratic growth property can be implied by the traditional strong convexity con- +dition (around the minimizers) which is satisfied by a number of popular learning models including +linear and logistic regression, generalized linear models, smoothed Huber losses, and various other +statistical estimation problems. Particularly, Assumption 2 holds when Rℓ is strongly convex and +r is convex. +Notice that for risk functions with quadratic growth property, the prior analysis +of M-SPP for Lipschitz losses (Wang et al., 2017b) is not generally applicable because Assump- +tion 2 implies that the Lipschitz constant of loss could be arbitrarily large if the infinite distance +minw∗∈W ∗ ∥w − w∗∥ → ∞ is allowed. +2.2 +Main Results +The following theorem is our main result on the in-expectation rate of convergence of the exact +M-SPP with smooth loss and quadratic growth population risk functions. Recall that N = nT is +the total number of data points visited up to the iteration counter T. +Theorem 1. Suppose that Assumptions 1 and 2 hold. Consider ǫt ≡ 0 and the weighted average +output ¯wT = +2 +T(T+1) +�T +t=1 twt in Algorithm 1. Let ρ ∈ (0, 0.5] be an arbitrary scalar. +(a) Suppose that n ≥ 64L +λρ . Set γt = λρt +4 +for t ≥ 1. Then for any T ≥ 1, +E [R( ¯wT ) − R∗] ≤ 4ρ [R(w0) − R∗] +T 2 ++ 29L +λρnT R∗. +(b) Set γt = λρt +4 + 16L +n +for t ≥ 1. Then for any T ≥ 1, +E [R( ¯wT ) − R∗] ≤ +� 4ρ +T 2 + 28L +λnT +� +[R(w0) − R∗] + +� 216L2 +λ2ρ2n2T + 29L +λρnT +� +R∗, +Proof. The proof technique is inspired by the uniform stability arguments developed by Wang et al. +(2017b) for Lipschitz and instance-wise strongly convex loss, with several new elements along de- +veloped for handling smooth loss and quadratic growth of risk function. As a non-trivial ingredient, +we show that it is possible to extend those stability arguments to smooth losses in view of a clas- +sical result from Srebro et al. (2010, Lemma 2.1) that allows the derivative of a smooth loss to be +bounded in terms of its function value. See Appendix A.1 for a full proof of this result. +A few remarks on Theorem 1 are in order. +Remark 1. In Part (a), the minibatch size is required to be sufficiently large. In this setting, +the excess risk bound consists of two components: the first bias component associated with initial +gap R(w0) − R∗ has a decaying rate O( 1 +T 2) and the second variance component associated with R∗ +vanishes at a dominate rate of O( +1 +λnT ). The variance term shows that the convergence rate can +be improved in the low-noise settings where the factor of R∗ is relatively small. Extremely in the +separable case with R∗ = 0, the excess risk bound of Theorem 1 would scale as fast as O( 1 +T 2 ). +10 + +Remark 2. One disadvantage of the result in Part (a) lie in that the minibatch size is required to +be sufficiently larger than the condition number of the population risk R. Contrastively, the excess +risk bound in Part (b) holds for arbitrary minibatch sizes. The cost, however, is a relatively slower +bias decaying term O( 1 +T 2 + +1 +λnT ) which is dominated by O( +1 +λnT ) in the case of T ≫ n. +Remark 3. Let N = nT be the total number of data points accessed. When T ≫ n, the O( 1 +N ) +dominant rates in Theorem 1 match those prior ones for SPP-type methods (Wang et al., 2017b; +Davis and Drusvyatskiy, 2019) which are, however, obtained under the assumption that each in- +dividual loss function should be Lipschitz continuous and strongly convex. In comparison to the +O( 1 +N ) rate established for SGD with smooth loss (Lei and Ying, 2020, Theorem 12), our result in +Theorem 1 is stronger and less stringent in the following senses: 1) our bound shows explicitly the +impact of R∗ which usually represents the noise level of model, and 2) we only require the popu- +lation risk to have quadratic growth property while the bound of Lei and Ying (2020, Theorem 12) +not only requires the loss to be Lipschitz but also assumes the empirical risk to be strongly convex. +Let us further look into the choice of the scalar ρ in Theorem 1. We focus the discussion on +the part (a) and similar observations apply to the part (b). We distinguish the discussion in the +following two complementary cases regarding the mutual strength of minibatch-size n and round +of iteration T: +• Case I: Small-n-large-T. Suppose that n = O(1) and T → ∞ is allowed. In this case, simply +setting ρ = 0.5 yields the convergence rate of order O +� 1 +T 2 + +1 +λnT +� +in the part (a). +• Case II: Small-T-large-n. Suppose that T = O(1) and n → ∞ is allowed. In this setup, +given that n ≥ +4T +λ , then with a roughly optimal choice ρ = +� +T +nλ the excess risk bound in +Theorem 1(a) will be of the order O +� +1 +T +√ +λnT +� +, which is substantially slower than the previous +fast rate in Case I. This is intuitive because M-SPP with large minibatches behaves more like +regularized ERM which is known to exhibit slow rate of convergence even for strongly convex +problems (Shalev-Shwartz et al., 2010; Srebro et al., 2010). Nevertheless, such a small-T-large-n +setup is of special interest for off-line incremental learning with large minibatches and distributed +statistical learning (Li et al., 2014; Wang et al., 2017b; You et al., 2020). We will address this +critical case in the next subsection. +11 + +2.3 +A Two-Phase M-SPP Method +Algorithm 2: Two-Phase M-SPP (M-SPP-TP) +Input +: Dataset S = {St}T +t=1 in which St := {zi,t}n +i=1 +i.i.d. +∼ Dn, regularization modulus +{γt > 0}t∈[T]. +Output: ¯wT as a weighted average of {wt}2≤t≤T . +Initialization Specify a value of w0. Typically w0 = 0. +/* Phase-I +*/ +Divide sample S1 into disjoint minibatches of equal size m; +Run M-SPP over these minibatches to obtain the output w1; +/* Phase-II +*/ +Initialized with w1, run M-SPP over data minibatches {St}2≤t≤T with {γt}2≤t≤T to obtain +the sequence {wt}2≤t≤T . +To remedy the deficiencies mentioned in the previous discussion, we propose a two-phase variant +of M-SSP, as outlined in Algorithm 2, to boost its performance in the small-T-large-n regimes. +The procedure can be regarded as sort of a restarting argument (Nemirovskii and Nesterov, 1985; +Renegar and Grimmer, 2022; Zhou et al., 2022) for M-SPP. More specifically, the Phase-I serves +as an initialization step that invokes M-SPP to a uniform division of S1 with minibatch size m +to obtain w1. Then starting from w1, the Phase-II just invokes M-SPP to the consequent large +minibatches {St}t≥2 which is suitable for large-scale parallelization if applicable. The following +theorem is a consequence of Theorem 1 to such a two-phase M-SPP procedure. +Theorem 2. Suppose that Assumptions 1 and 2 hold. +Consider ǫt ≡ 0 for implementing M- +SPP in both Phase-I and Phase-II of Algorithm 2. Consider the weighted average output ¯wT = +2 +(T−1)(T+2) +�T +t=2 twt in Phase-II. +(a) Suppose that n ≥ 128L +λ . Set m = 128L +λ +in Phase-I and γt = λt +8 for implementing M-SPP in +both Phase-I and Phase II. Then for any T ≥ 2, ¯wT satisfies +E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] +λ2n2T 2 ++ +L +λnT R∗. +(b) Set m = O(1) in Phase-I and γt = λt +8 + 16L +n +for implementing M-SPP in both Phase-I and +Phase-II. Then for any T ≥ 2, ¯wT satisfies +E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] +λ2nT ++ +L3 +λ3nT R∗. +Proof. See Appendix A.2 for a proof of this result. +12 + +Remark 4. The part (a) of Theorem 2 suggests that when the minibatch size is sufficiently large, +the excess risk bound of two-phase M-SPP has a bias decaying term of scale O +� +1 +n2T 2 +� +and a variance +term that decays at the rate of O( 1 +nT ). The rate is valid even when the scales of T relatively small, +and thus is stronger than the O +� +1 +T +√ +nT +� +rate implied by Theorem 1 for the vanilla M-SPP in the +small-T-large-n regime. It is worth to mention that both the bias and variance components in our +bound for M-SPP are faster than those derived for strongly convex ERM (Srebro et al., 2010). +Remark 5. The excess risk bound in Part (b) of Theorem 2 is valid for arbitrary minibatch sizes, +but at the cost of a relatively slower O( 1 +nT ) bias decaying rate. +2.4 +Results for Arbitrary Convex Risks +We further analyze the proposed M-SPP algorithm when the loss function ℓ is convex and smooth, +but without requiring that the composite risk R has quadratic growth property. The following is +our main result in such a generic setting. +Theorem 3. Suppose that Assumption 1 holds. Set γt ≡ γ ≥ 16L +n . Let ¯wT = +1 +T +�T +t=1 wt be the +average output of Algorithm 1. Then +E [R( ¯wT ) − R∗] ≲ γ +T D2(w0, W ∗) + L +γnR∗. +Particularly for γ = +� +T +n + 16L +n , it holds that +E [R( ¯wT ) − R∗] ≲ +� +1 +√ +nT ++ L +nT +� +D2(w0, W ∗) + +L +√ +nT +R∗. +Proof. See Appendix A.3 for a proof of this result. +Remark 6. The first bound of Theorem 3 implies that for any ǫ ∈ (0, 1), by setting γ = O +� L +ǫn +� +, +R( ¯wt) converges to (1+ǫ)R∗ at the rate of O( +1 +nTǫ). This bound matches the results of Lei and Ying +(2020, Theorem 4) for smooth SGD method. The second bound of Theorem 3 further shows that by +setting γ = O( +� +T +n + L +n), the excess risk of ¯wT decays at the rate of O( +1 +√ +nT ) for both bias and variance +terms, which matches in order the corresponding bound derived for Lipschitz-loss (Wang et al., +2017b, Theorem 4). To our knowledge, such a bias-variance composite rate of convergence is new +for SPP-type methods with convex and smooth loss functions. +Analogous to the robustness analysis of SPP (Asi and Duchi, 2019a,b), we have also analyzed +the iteration stability of M-SPP for convex losses with respect to the choice of regularization +modulus γt. The corresponding results, which can be found in Appendix A.4, confirm that the +choice of γt is insensitive to the gradient scale of loss functions for generating a non-divergent +sequence of estimation errors. +13 + +3 +Perturbation Analysis for Inexact M-SPP +In the preceding section, we have analyzed the convergence rates of M-SPP under the assumption +that the inner-loop proximal ERM subproblems constructed in its iteration procedure (2) are solved +exactly, i.e., ǫt ≡ 0. To make our analysis more practical, we further provide in this section a +perturbation analysis of M-SPP when the inner-loop proximal ERM subproblems are only required +to be solved approximately up to certain precision ǫt > 0. As a starting point, we need to impose +the following Lipschitz continuity assumption on the regularization term r. +Assumption 3. The regularization term r is Lipschitz continuous over W, i.e., |r(w) − r(w′)| ≤ +G∥w − w′∥, ∀w, w′ ∈ W. +For example, the ℓ1-norm regularizer r(w) = µ∥w∥1 satisfies this assumption with respect to +Euclidean norm as |r(w) − r(w′)| = µ|∥w∥1 − ∥w′∥1| ≤ µ∥w − w′∥1 ≤ µ√p∥w − w′∥. +The following theorem is our main result on the rate of convergence of the inexact M-SPP for +composite stochastic convex optimization with smooth losses. +Theorem 4. Suppose that Assumptions 1, 2 and 3 hold. Let ρ ∈ (0, 1/4] be an arbitrary scalar +and set γt = λρt +4 . Suppose that n ≥ 76L +λρ . Assume that ǫt ≤ +ǫ +nt4 for some ǫ ∈ [0, 1]. Then for any +T ≥ 1, the weighted average output ¯wt = +2 +T(T+1) +�T +t=1 twt of Algorithm 1 satisfies +E [R( ¯wt) − R∗] ≲ ρ +T 2 (R(w0) − R∗) + +L +λρnT R∗ + +√ǫ +T 2 +� L +λρ + G +� +1 +λρ +� +. +Proof. See Appendix B.1 for a proof of this result. We would like to highlight that our perturbation +analysis for smooth loss is considerably different from that of Wang et al. (2017b) developed for +Lipschitz loss. This is mainly because in the smooth loss case, the change of loss could no longer be +upper bounded by the change of prediction, and thus we need to make a more careful treatment to +the perturbation caused by inexact minimization of the regularized minibatch empirical risk. +We provide in order a few remarks on Theorem 4. +Remark 7. Theorem 4 suggests that the excess risk bound of exact M-SPP in the part (a) of The- +orem 1 can be extended to its inexact version, provided that the inner-loop minibatch ERMs (2) +are solved to sufficient accuracy, say, ǫt ≤ O +� 1 +nt4 +� +. Similarly, the result in the part (b) of Theo- +rem 1 for arbitrary minibatch sizes can also be extended to the inexact M-SPP, which is omitted to +avoid redundancy. Since the inner-loop minibatch ERMs are strongly convex and the loss functions +are smooth, in average the desired accuracy can be attained in logarithmic time O +� +log +� +1 +ǫt +�� +via +variance-reduced SGD methods (Xiao and Zhang, 2014). +14 + +Remark 8. Analogous to the discussions at the end of Section 2.2, by specifying the choice of +ρ we can derive a direct consequent result of Theorem 4 which more explicitly shows the rate of +convergence with respect to N = nT. Also for the two-phase M-SPP, in view of Theorem 4 we +can show that the bound in Theorem 2 can be extended to the inexact setting if the minibatch +optimization is sufficiently accurate. These extensions are more or less straightforward and thus +are omitted. +In the following theorem, we provide an excess risk bound for the inexact M-SPP when the +composite risk R is convex but not necessarily has quadratic growth property. +Theorem 5. Suppose that Assumptions 1 and 3 hold. +Set γt ≡ γ ≥ +19L +n . +Assume that ǫt ≤ +min +� +ǫ +n2t5 , 2G2 +9n2γ +� +for some ǫ ∈ [0, 1]. Then the average output ¯wT = +1 +T +�T +t=1 wt of Algorithm 1 +satisfies +E [R( ¯wT ) − R∗] ≲ γ +T D2(w0, W ∗) + L +γnR∗ + +� L +γn + +γ +LnT + +G +√γnT +� √ǫ. +Particularly for γ = +� +T +n + 19L +n , it holds that +E [R( ¯wT ) − R∗] ≲ +� +1 +√ +nT ++ L +nT +� +D2(w0, W ∗) + +L +√ +nT +R∗ + +�L + G +√ +nT ++ 1 +nT +� √ǫ. +Proof. See Appendix B.2 for a proof of this result. +Remark 9. Theorem 5 confirms that the excess risk bounds established in Theorem 3 for exact +M-SPP are tolerant to sufficiently small sub-optimality ǫt ≤ O( +1 +n2t5 ) of minibatch proximal ERM +subproblems. +4 +Performance Guarantees with High Probability +In the previous two sections, we have analyzed the excess risk bounds of M-SPP in expectation. +In this section, we move on to study high-probability guarantees of M-SPP with respect to the +randomness of training data, still under the notion of algorithmic stability. To this end, we first +introduce a variant of M-SPP which carries out the proximal point update via sampling without +replacement over the given data minibatches. +We then show that the output of the proposed +algorithm is uniformly stable in expectation over the randomness of sampling. As a main result +of this section, for strongly convex population risk, we establish a near-optimal high probability +(with respect to data) bound on the estimation error ∥ ¯wt − w∗∥ that holds in expectation over +the randomness of inner-data sampling. Additionally, we provide a high-probability generalization +bound for arbitrary convex loss. +15 + +4.1 +Sampling Without Replacement M-SPP +Algorithm 3: Sampling Without Replacement M-SPP (M-SPP-SWoR) +Input +: Dataset S = {St}T +t=1 in which St := {zi,t}n +i=1 +i.i.d. +∼ Dn, regularization modulus +{γt > 0}t∈[T]. +Output: ¯wT as a weighted average of {wt}1≤t≤T .. +Initialization Specify a value of w0. Typically w0 = 0. +for t = 1, 2, ..., T do +Uniformly randomly sample an index ξt ∈ [T] without replacement. +Estimate wt satisfying +Ft(wt) ≤ min +w∈W +� +Ft(w) := RSξt(w) + γt +2 ∥w − wt−1∥2� ++ ǫt, +(7) +where ǫt ≥ 0 measures the sub-optimality. +end +Let us consider the M-SPP-SWoR (M-SPP via Sampling Without Replacement) procedure as +outlined in Algorithm 3. Given a set S of T data minibatches, at each iteration, the algorithm +uniformly randomly samples one minibatch from S without replacement for proximal update. After +T rounds of iteration, all the minibatches are used to update the model. Since this procedure is +merely a random shuffling variant of M-SPP as presented in Algorithm 1, we can see that all the +in-expectation bounds established in the previous sections for M-SPP directly transfer to M-SPP- +SWoR under any implementation of shuffling. As we will show shortly in the next subsection that +such a random shuffling scheme is beneficial for boosting the on-average algorithmic stability of +M-SPP which then leads to strong high-probability guarantees for M-SPP-SWoR. +4.2 +A Uniform Stability Analysis +Let S = {St}t∈[T] and S′ = {S′ +t}t∈[T] be two sets of data minibatches. We denote by St .= S′ +t +if St and S′ +t differ in a single data point, and by S +.= S′ if S and S′ differ in a single mini- +batch and a single data point in that minibatch. We introduce the following concept of uniform +stability of M-SPP which substantializes the concept of uniform algorithmic stability that serves +as a powerful tool for analyzing generalization bounds of statistical estimators and their learning +algorithms (Bousquet and Elisseeff, 2002; Hardt et al., 2016; Feldman and Vondr´ak, 2019). +Definition 1 (Uniform Stability of M-SPP). The M-SPP algorithm is said to be ̺-uniformly stable +with respect to a mapping h : W �→ Rq if ∥h( ¯wT ) − h( ¯w′ +T )∥ ≤ ̺ for any pair of data sets S .= S′. +The following result gives a uniform stability (with respect to identical mapping) bound of the +vanilla M-SPP (Algorithm 1) that holds deterministically, and a corresponding bound for M-SPP- +SWoR (Algorithm 3) that holds in expectation over the randomness of minibatch sampling. +16 + +Proposition 1. Suppose that Assumption 1 holds and the loss function is bounded such that 0 ≤ +ℓ(y′, y) ≤ M for all y, y′. Let S = {St}t∈[T] and S′ = {S′ +t}t∈[T] be two sets of data minibatches +satisfying S .= S′. Then +(a) The weighted average output ¯wT and ¯w′ +T respectively generated by M-SPP (Algorithm 1) over +S and S′ satisfy +sup +S,S′ ∥ ¯wT − ¯w′ +T ∥ ≤ +4 +√ +2LM +n mint∈[T] γt ++ +T +� +t=1 +2 +�2ǫt +γt +. +(b) The weighted average output ¯wT and ¯w′ +T respectively generated by M-SPP-SWoR (Algo- +rithm 3) over S and S′ satisfy +sup +S,S′ Eξ[T ] +� +∥ ¯wT − ¯w′ +T ∥ +� +≤ +T +� +t=1 +� +4 +√ +2LM +nTγt ++ 2 +� +2ǫt +γt +� +. +Proof. See Appendix C.1 for a proof. +Remark 10. Suppose that the sub-optimality {ǫt}t∈[T] are sufficiently small. If setting γt = O(t) +as used for population risks with quadratic growth property, then Proposition 1 shows that M-SPP is +O +� 1 +n +� +-uniformly stable, while in expectation over the randomness of without-replacement sampling, +M-SPP-SWoR has an much improved uniform stability parameter scaling as O +�log(T) +nT +� +. If setting +γt ≡ +� +T +n as used for generic convex loss, then M-SPP will be O +� +1 +√ +nT +� +-uniformly stable while +M-SPP-SWoR has an identical uniform stability parameter in expectation over sampling. +In the following theorem, based on the uniform stability bounds in Proposition 1, we derive an +upper bound on the estimation error D( ¯wT , W ∗) of M-SPP-SWoR that holds with high probability +over data distribution while in expectation over randomly sampling the minibatches for update. +Theorem 6. Suppose that Assumptions 1, 2, 3 hold and the loss function ℓ is bounded in the +interval (0, M]. Let ρ ∈ (0, 1/4] be an arbitrary scalar and set γt = λρt +4 . Suppose that n ≥ 76L +λρ . +Assume that ǫt ≤ min +� +ǫ +nt4 , +LM +λρn2T 2t +� +for some ǫ ∈ [0, 1]. Then with probability at least 1 − δ over +S, the weighted average output ¯wT of M-SPP-SWoR (Algorithm 3) satisfies +Eξ[T ] [D( ¯wT , W ∗)] +≲ +� +LM log(1/δ) log(T) +λρ +√ +nT ++ +� +ρ [R(w0) − R∗] +λT 2 ++ +L +λ2ρnT R∗ + +√ǫ +λT 2 +� L +λρ + G +� 1 +λρ +� +. +Proof. See Appendix C.2 for a proof of this result. +17 + +Remark 11. We comment on the optimality of the bound in Theorem 6. Consider ρ = O(1). The +first term of scale O +�√ +log(1/δ) log(T) +√ +nT +� +represents the overhead of getting generalization with high prob- +ability over data. The second term matches the corresponding in-expectation estimation error bound +in Theorem 4, which matches the known optimal rates for strongly convex SGD (Rakhlin et al., +2012; Dieuleveut et al., 2017). In view of the minimax lower bounds for statistical estimation (Tsybakov, +2008), the estimation error bound established in Theorem 6 is near-optimal for strongly convex risk +minimization. +Finally, we provide a high-probability generalization bound of M-SPP for arbitrary convex +population risk functions. +Theorem 7. Suppose that Assumptions 1 and 3 hold and the loss function ℓ is bounded in the +interval [0, M]. Set γt ≡ +� +T +n . Assume that ǫt ≤ +LM +4nT 2√ +nT . Then with probability at least 1 − δ over +S, the average output ¯wT = 1 +T +�T +t=1 wt of M-SPP (Algorithm 1) satisfies +|R( ¯wT ) − RS( ¯wT )| ≲ (LM + G +√ +LM) log(N) log(1/δ) +√ +nT ++ M +� +log (1/δ) +nT +. +Proof. See Appendix C.3 for a proof of this result. +We remark in passing that using similar uniform stability argument, the high-probability gen- +eralization bound in Theorem 7 can be shown to hold for convex and non-smooth loss functions as +well. We omit the detailed analysis as it is out of the scope of this paper focusing on smooth losses. +5 +Comparison with Prior Methods +Comparison with M-SPP and SPP methods. The M-SPP algorithm considered in this article is a +minibatch extension of the SPP methods. The convergence analysis of SPP has received recent +wide attention in stochastic optimization community. Specially for finite-sum optimization over +N data points, an incremental SPP method was proposed and analyzed in (Bertsekas, 2011). For +learning with linear prediction models and strongly convex Lipschitz-loss, (Toulis et al., 2016) es- +tablished a set of O( 1 +Nγ ) rates of convergence for SPP with suitable γ ∈ (0.5, 1], where N is the +iteration counter. For arbitrary convex loss functions, the non-asymptotic convergence performance +of SPP was studied with O( 1 +√ +N ) rate obtained for Lipschitz losses (Patrascu and Necoara, 2017; +Davis and Drusvyatskiy, 2019), O( 1 +N ) for strongly convex and Lipschitz (Davis and Drusvyatskiy, +2019) or smooth (Patrascu and Necoara, 2017) losses, or O +� +log(N) +N +� +rate for strongly convex non- +smooth losses (Asi and Duchi, 2019b). Recently, it has been shown that the O +� +log(N) +N +� +rate also ex- +tends to M-SPP with strongly convex losses (Asi et al., 2020). The asymptotic and non-asymptotic +behaviors of SPP for weakly convex losses (e.g., composite of convex loss with smooth map) have +been studied for stochastic optimization with (Duchi and Ruan, 2018) or without (Davis and Drusvyatskiy, +18 + +2019) composite structures. Among others, our work is most closely related to the minibatch prox- +imal update method developed for communication-efficient distributed optimization (Wang et al., +2017b). Similarly from the viewpoint of algorithmic stability, the O( 1 +Nγ ) rates were established +for that method for Lipschitz-loss with arbitrary convexity (γ = 0.5) or strong convexity (γ = 1). +In comparison to these prior results, our convergence results for M-SPP are new in the following +aspects: +• The convergence rates are derived for smooth losses and they explicitly show the impact of +noise level of a statistical model, as encoded in R∗, to convergence performance which has +not been previously known for SPP-type methods. +• The O(N −1) fast rate attained in this article is valid for population risks with quadratic +growth property, without requiring each instantaneous loss to be strongly convex. +• We provide a near-optimal model estimation error bound of a sampling-without-replacement +variant of M-SPP that holds with high probability over the randomness of data while in +expectation over the randomness of sampling. +Comparison with SGD and ERM. Similar to those in Theorem 1 and Theorem 3, the bias- +variance composite rates have been known for accelerated SGD for least squares regression (Dieuleveut et al., +2017), or minibatch SGD (M-SGD) for generic convex and smooth learning problems (Woodworth and Srebro, +2021). While the results are of similar flavor, we came to the path in a distinct algorithmic frame- +work using quite different proof techniques. Particularly, in contrast to Woodworth and Srebro +(2021), our analysis neither uses the knowledge of model scale which is typically inaccessible in +real problems, nor relies on the restarting arguments for strongly convex problems. Also for SGD +with smooth loss functions, a fast rate of O( 1 +N ) has recently been established via stability theory +in the ideally clean case where the optimal population risk is zero (Lei and Ying, 2020, Theorem +4). +With γ = O( 1 +n), the first bound of our Theorem 3 matches that bound in the context of +M-SPP. For strongly convex problems, our results in Theorem 1 are stronger than (Lei and Ying, +2020, Theorem 12) in the sense that the formers (ours) only require the population risk to have +quadratic growth property while the latter requires the loss to be Lipschitz and the empirical risk +to be strongly convex. Finally, for convex ERM, similar composite risk bounds have been estab- +lished by Srebro et al. (2010); Zhang et al. (2017) under somewhat more stringent conditions such +as bounded domain of interest and huge sample with N ≫ p. +19 + +Table 2 summaries a comparison of the risk bounds obtained in this work to several prior ones +for (M-)SPP, (M-)SGD and ERM. +Method +Literature +Risk Bound +Conditions +Loss +R +RS +M-SPP +Asi et al. (2020) +O +� +log(N) +N +� +s.cvx +— +— +Wang et al. (2017b) +O +� 1 +N +� +Lip & s.cvx +— +— +Theorem 1 (our work) +O +� +1 +T 2 + R∗ +N +� +or +O +� +1 +T 2 + 1+R∗ +N +� +sm & cvx +qg +— +Theorem 3 (our work) +O +� 1 +N + R∗� +or +O +� +1+R∗ +√ +N +� +sm & cvx +— +— +SPP +Asi and Duchi (2019b) +O +� +log(N) +N +� +s.cvx +— +— +Patrascu and Necoara (2017) +O +� 1 +N +� +sm & s.cvx +— +Davis and Drusvyatskiy (2019) +O +� 1 +N 2 + 1 +N +� +Lip & s.cvx +— +M-SGD +Woodworth and Srebro (2021) +O +� +1 +T 2 + 1 +N + +� +R∗ +N +� +sm & cvx +— +— +O +� +e−T + R∗ +N +� +sm & cvx +qg +— +Dieuleveut et al. (2017) +O +� +1 +N 2 + R∗ +N +� +quadratic +s.cvx +— +SGD +Lei and Ying (2020) +O +� 1 +N + R∗� +or +O +� +1+R∗ +√ +N +� +sm & cvx +— +s.cvx +Rakhlin et al. (2012) +O +� 1 +N +� +Lip & +sm & cvx +s.cvx +— +ERM +Zhang et al. (2017) +O +� +p +N + R∗ +N +� +or +O +� +1 +N 2 + R∗ +N +� +for N ≳ p +sm & cvx +Lip +& s.cvx +— +Srebro et al. (2010) +O +� +1 +N + +� +R∗ +N +� +sm & cvx +— +— +Table 2: Comparison of our risk bounds to some prior results for M-SPP and SPP as well as for +SGD and ERM. Recall that T is the iteration count and N is the total number of samples accessed. +All the listed bounds hold in expectation. Here we have used the following abbreviations: cvx +(convex), s.cvx (strongly convex), Lip (Lipschitz continuous), sm (smooth), qg (quadratic growth). +20 + +6 +Experiments +We carry out a set of numerical study to demonstrate the convergence performance of minibatch +stochastic proximal point methods in (composite) statistical learning problems, to answer the fol- +lowing 3 questions associated with the key theory and algorithms established in this article: +• Question 1: How the size of minibatch and noise level of a statistical learning model affect +the convergence speed of M-SPP for smooth loss function? +This question is mainly about +verifying Theorem 1 and Theorem 5, and it is answered through a simulation study on Lasso +estimation in Section 6.1. +• Question 2: Can the two-phase variant of M-SPP improve over M-SPP in the small-T-large-n +setting? The simulation results presented in Section 6.1 also answer this question related to +the verification of Theorem 2. +• Question 3: How M-SPP(-TP) methods compare with M-SGD in convergence performance? +The real-data experimental results on logistic regression tasks in Section 6.2 answer this +question about algorithm comparison. +6.1 +Simulation Study +We first provide a simulation study to verify our theoretical results for smooth losses when substan- +tialize to the widely used Lasso regression model (Wainwright, 2009) with quadratic loss function +ℓ(fw(x), y) = 1 +2(y−w⊤x)2 and r(fw) = µ∥w∥1 where µ is the ℓ1-penalty modulus. Given a model pa- +rameter ¯w ∈ Rp and a feature point x ∈ Rp drawn from standard Gaussian distribution N(0, Ip×p), +the responses y is generated according to a linear model y = ¯w⊤x + ε with a random Gaussian +noise ε ∼ N(0, σ2). In this case, the population risk function can be expressed in a close form as +R(w) = 1 +2∥w − ¯w∥2 + σ2 +2 + µ∥w∥1. +Given a set of T random n-minibatches +� +St = {xi,t, yi,t}i∈[n] +� +t∈[T] drawn from the above data +distribution, we aim at evaluating the convergence performance of M-SPP towards the minimizer +of R which can be expressed as +w∗ = ( ¯w − µ)+ − (− ¯w − µ)+, +where (·)+ is an element-wise function that preserves the positive parts of a vector. +We test with p = 5000 and N = nT = 100p, and consider a well-specified sparse regression +model where the true parameter vector ¯w is ¯k-sparse with ¯k = 0.2p and its non-zero entries are +sampled from a zero-mean Gaussian distribution. We set µ = 10−3 and initialize w(0) = 0. The +inner-loop minibatch proximal Lasso subproblems are optimized via a standard proximal gradient +descent method, using either of the following two termination criteria: 1) the difference between +consecutive objective values is below 10−3 and 2) the iteration step reaches 1000. +21 + +0 +1 +2 +3 +4 +5 +105 +-5 +0 +5 +10 +(a) Results under varying T . +0 +1 +2 +3 +4 +5 +105 +-5 +0 +5 +10 +(b) Results under varying σ. +0 +1 +2 +3 +4 +5 +105 +-10 +-5 +0 +5 +10 +15 +(c) M-SPP versus M-SPP-TP +Figure 1: Simulation study on Lasso regression: Convergence performances of M-SPP and M-SPP- +TP. The y-axis represents the logarithmic scale of excess risk. +The following two experimental setups are considered for theory verification: +• We fix the noise level σ = 0.1 and study the impact of varying T ∈ {10, 20, 100, 500} on the +convergence of M-SPP. Figure 1(a) shows the evolving curves of excess risk as functions of +sample size, in a semi-log layout with y-axis representing the logarithmic scale of excess risk. +From this set of curves we can observe a clear trend that in the early stage, M-SPP converges +faster when the total number of minibatches is relatively large (say, T ∈ {20, 100}). This is +consistent with the prediction of Theorem 1 about the impact of T and n on convergence rates. +While in the final stage, relatively slower convergence behavior is exhibited under relatively +larger T (say, T ∈ {100, 500}). This observation can be explained by the inexact analysis +in Theorem 4 which shows that to guarantee the desired convergence rate, the inner-loop +proximal ERM update needs to be extremely accurate when T is relatively large. Therefore, +the question raised in Question 1 on the impact of minibatch size on convergence rate is +answered by this group of results. +Also in this setup, we have compared M-SPP and its two-phase variant M-SPP-TP for +T ∈ {5, 10}. The related results are shown in Figure 1(c), which indicate that M-SPP-TP sig- +nificantly improves the convergence of M-SPP in the small-T-large-n cases. This observation +supports the result of Theorem 2 and answers Question 2 affirmatively. +• We fix T = 50 and study the impact of varying noise level σ ∈ {0.1, 1, 5} on the convergence +performance. The results are shown in Figure 1(b). From this group of results we can see that +faster convergence speed is attained at relatively smaller noise level σ, while the speed becomes +insensitive to noise level when σ is sufficiently small (say, σ ≤ 1). This is consistent with the +predication by Theorem 1, keeping in mind the fact that R∗ = 1 +2∥w∗ − ¯w∥2 + σ2 +2 + µ∥w∗∥1 ≤ +∥ ¯w∥2 + 1 +2σ2. The question raised in Question 1 on the impact of noise level on convergence +performance is answered by this group of results. +22 + +(a) n = N/5 +(b) n = N/20 +(c) n = N/100 +Figure 2: Real-data results on logistic regression: Test error convergence comparison on gisette +under varying minibatch size. +6.2 +Experiment on Real Data +We further compare our methods with M-SGD for binary prediction problems using the logistic loss +ℓ(w⊤x, y) = log(1+exp(−yw⊤x)). Here the M-SGD method is implemented by an SGD solver from +SGDLibrary (Kasai, 2017). For M-SPP and M-SPP-TP, the The inner-loop minibatch proximal +ERMs are solved by the same SGD solver applied with a fixed SGD-batch-size 10 and a single +epoch of data processing. We initialize w(0) = 0 for all the considered methods. +We use two public data sets for evaluation: the gisette data (Guyon et al., 2004) with p = +5000, N = 6000 and the covtype.binary data (Collobert et al., 2001) with p = 54, N = 581, 012 1. +For each data set, we use half of the samples as training set and the rest as test set. +We are +interested in the impact of minibatch-size n on the prediction performance of model measured by +test error. All the considered stochastic algorithms are executed with 10 epochs of data processing, +and thus the overall number of minibatches is T = N/n × 10. We replicate each experiment 10 +times over random split of data and report the results in mean-value along with error bar. +In Figure 2, we show the evolving curves (error bar shaded in color) of test error with respect +to the number of minibatches accessed on gisette, under varying minibatch size n ∈ {N +5 , N +20, N +100}. +From this set of curves we can observe that: +• Under the same minibatch size, M-SPP and M-SPP-TP converge faster and stabler than +M-SGD, especially when the minibatch size is relatively large (see Figure 2(a)). This is as +expected because when minibatch size becomes large, M-SGD approaches to gradient descent +method while M-SPP approaches ERMs. This answers Question 3 raised at the beginning of +the experiment section. +• M-SPP-TP exhibits sharper convergence behavior than M-SPP at the early stage of iteration, +especially when the minibatch-size is relatively large. This is consistent with our theoretical +results in Theorem 1 and Theorem 2. +1Both data sets are available at https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/. +23 + +0.5 +M-SGD +0.4 +-M-SPP +Test Error +M-SPP-TP +0.3 +0.2 +0.1 +0 +0 +200 +400 +600 +800 +1000 +#Minibatches0.5 +M-SGD +0.4 +-M-SPP +Test Error +M-SPP-TP +0.3 +0.2 +0.1 +0 +0 +50 +100 +150 +200 +#Minibatches0.5 +M-SGD +0.4 +-M-SPP +Test Error +M-SPP-TP +0.3 +0.2 +0.1 +0 +0 +10 +20 +30 +40 +50 +#Minibatches(a) n = N/20 +(b) n = N/100 +(c) n = N/1000 +Figure 3: +Real-data results on logistic regression: +Test error convergence comparison on +covtype.binary under varying minibatch size. +Figure 3 shows the corresponding results on covtype under n ∈ +� N +20, N +100, +N +1000 +� +. From this set of +results we once again see that M-SPP and M-SPP-TP consistently outperform M-SGD under the +same minibatch size, and M-SPP-TP converges faster than M-SPP under relatively larger minibatch +size (say, n = N +20). +7 +Conclusions and Future Prospects +In this article, we presented an improved convergence analysis for the minibatch stochastic proximal +point methods with smooth and convex losses. Under the quadratic growth condition on population +risk, we showed that M-SPP with minibatch-size n and iteration count T converges at a composite +rate consisting of an O( 1 +T 2) bias decaying component and an O( 1 +N ) variance decaying component. +In the small-n-large-T case, this result substantially improves the prior relevant results of SPP- +type approaches which typically require each instantaneous loss to be Lipschitz and strongly convex. +Complementally in the small-T-large-n setting, we provide a two-phase acceleration of M-SPP which +improves the O( 1 +T 2) bias decaying rate to O +� +log(N) +N2 +� +. Perhaps the most interesting theoretical +finding is that the (dominant) variance decaying term has a factor dependence on the minimal value +of population risk, justifying the sharper convergence behavior of M-SPP in low-noise statistical +setting as backed up by our numerical evidence. In addition to the in-expectation risk bounds, we +have also derived a near-optimal parameter estimation error bound for a random shuffling variant of +M-SPP that holds with high probability over data distribution and in expectation over the random +shuffling. +To conclude, our theory lays a novel and stronger foundation for understanding the +convex M-SPP style algorithms that have gained recent significant attention, both in theory and +practice, for large-scale machine learning (Li et al., 2014; Wang et al., 2017a; Asi et al., 2020). +24 + +0.4 +M-SGD +-M-SPP +Test Error +M-SPP-TP +0.3 +0.2 +0.1 +0 +500 +1000 +1500 +2000 +#Minibatches0.4 +M-SGD +-M-SPP +Test Error +M-SPP-TP +0.3 +0.2 +0.1 +0 +200 +400 +600 +800 +#Minibatches0.4 +M-SGD +-M-SPP +Test Error +M-SPP-TP +0.3 +0.2 +0.1 +0 +50 +100 +150 +200 +#MinibatchesThere are several key prospects for future investigation of our theory: +• It is still open to derive near-optimal exponential excess risk bounds for M-SPP that apply +to the (suffix) average or last of iterates over training data. +• Inspired by the recent progresses made towards understanding M-SPP with momentum ac- +celeration (Deng and Gao, 2021; Chadha et al., 2022), it is interesting to provide momentum +and weakly-convex extensions of our theory for smooth loss functions. +• Last but not least, we expect that the theory developed in this article can be extended to the +setup of non-parametric learning with minibatch stochastic proximal point methods. +Acknowledgements +The authors sincerely thank the anonymous referees for their constructive comments. The work of +Xiao-Tong Yuan is also funded in part by the National Key Research and Development Program of +China under Grant No. 2018AAA0100400 and in part by the Natural Science Foundation of China +(NSFC) under Grant No.U21B2049, No.61876090 and No.61936005. +25 + +A +Proofs for the Results in Section 2 +In this section, we present the technical proofs for the main results stated in Section 2. +A.1 +Proof of Theorem 1 +Here we prove Theorem 1 as restated below for convenience. +Theorem 1. Suppose that Assumptions 1 and 2 hold. Consider ǫt ≡ 0 and the weighted average +output ¯wT = +2 +T(T+1) +�T +t=1 twt in Algorithm 1. Let ρ ∈ (0, 0.5] be an arbitrary scalar. +(a) Suppose that n ≥ 64L +λρ . Set γt = λρt +4 +for t ≥ 1. Then for any T ≥ 1, +E [R( ¯wT ) − R∗] ≤ 4ρ [R(w0) − R∗] +T 2 ++ 29L +λρnT R∗. +(b) Set γt = λρt +4 + 16L +n +for t ≥ 1. Then for any T ≥ 1, +E [R( ¯wT ) − R∗] ≤ +� 4ρ +T 2 + 28L +λnT +� +[R(w0) − R∗] + +� 216L2 +λ2ρ2n2T + 29L +λρnT +� +R∗, +We first present the following lemma which will be used in the proof. It can be viewed as +a straightforward extension of the prior result (Wang et al., 2017b, Lemma 1) to the setup of +composite minimization. A proof is included here for the sake of completeness. +Lemma 1. Assume that the loss function ℓ is convex with respect to its first argument and the +regularization function r is convex. Then for any w ∈ W, we have +RSt(wt) − RSt(w) ≤ γt +2 +� +∥w − wt−1∥2 − ∥w − wt∥2 − ∥wt − wt−1∥2� +. +Proof. Since ℓ and r are both convex, RSt is convex over W. The optimality of wt implies that for +any w ∈ W and η ∈ (0, 1) +RSt(wt) + γt +2 ∥wt − wt−1∥2 ≤ RSt((1 − η)wt + ηw) + γt +2 ∥(1 − η)wt + ηw − wt−1∥2 +≤(1 − η)RSt(wt) + ηRSt(w) + γt +2 +� +(1 − η)∥wt − wt−1∥2 + η∥w − wt−1∥2 − η(1 − η)∥w − wt∥2� +, +where in the last inequality we have used the definition of the norm ∥ · ∥. Rearranging both sides +of the above inequality yields +η(RSt(wt) − RSt(w)) ≤ ηγt +2 +� +∥w − wt−1∥2 − (1 − η)∥w − wt∥2 − ∥wt − wt−1∥2� +, +which then implies (keep in mind that η > 0) +RSt(wt) − RSt(w) ≤ γt +2 +� +∥w − wt−1∥2 − (1 − η)∥w − wt∥2 − ∥wt − wt−1∥2� +. +Limiting η → 0+ in the above inequality yields the desired bound. +26 + +The following boundedness result for smooth function is due to Srebro et al. (2010, Lemma 3.1). +Lemma 2. If g is non-negative and L-smooth, then ∥∇g(w)∥ ≤ +� +2Lg(w). +Let {Ft}t≥1 be the filtration generated by the iterates {wt}t≥1 as Ft = σ (w1, w2, ..., wt). With +Lemma 1 and Lemma 2 in place, we can further establish the following key lemma that plays a +fundamental role in proving Theorem 1. +Lemma 3. Suppose that the Assumptions 1 holds. Set γt ≥ 16L +n . Then we have +E [R(wt) − R∗ | Ft−1] ≤ γt +� +D2(wt−1, W ∗) − E +� +D2(wt, W ∗) | Ft−1 +�� ++ 16L +γtn R∗. +Proof. Let us consider a sample set S(i) +t +which is identical to St except that one of the zi,t is replaced +by another random sample z′ +i,t. Denote +w(i) +t += arg min +w∈W +� +F (i) +t (w) := RS(i) +t (w) + γt +2 ∥w − wt−1∥2� +, +where RS(i) +t (w) := 1 +n +�� +j̸=i ℓ(w; zj,t) + ℓ(w; z′ +i,t) +� ++ r(w). Then we can show that +Ft(w(i) +t ) − Ft(wt) += 1 +n +� +j̸=i +� +ℓ(w(i) +t ; zj,t) − ℓ(wt; zj,t) +� ++ 1 +n +� +ℓ(w(i) +t ; zi,t) − ℓ(wt; zi,t) +� ++ r(w(i) +t ) − r(wt) + γt +2 ∥w(i) +t +− wt−1∥2 − γt +2 ∥wt − wt−1∥2 +=F (i) +t (w(i) +t ) − F (i) +t (wt) + 1 +n +� +ℓ(w(i) +t ; zi,t) − ℓ(wt; zi,t) +� +− 1 +n +� +ℓ(w(i) +t ; z′ +i,t) − ℓ(wt; z′ +i,t) +� +≤ 1 +n +���ℓ(w(i) +t ; zi,t) − ℓ(wt; zi,t) +��� + 1 +n +���ℓ(w(i) +t ; z′ +i,t) − ℓ(wt; z′ +i,t) +��� +ζ1≤ +∥∇ℓ(w(i) +t ; zi,t)∥ + ∥∇ℓ(wt; z′ +i,t)∥ +n +∥w(i) +t +− wt∥ +ζ2≤ +� +2Lℓ(w(i) +t ; zi,t) + +� +2Lℓ(wt; z′ +i,t) +n +∥w(i) +t +− wt∥, +where “ζ1” is due to the convexity of loss and in “ζ2”we have used Lemma 2. +The bound in +Lemma 1 implies +Ft(w(i) +t ) − Ft(wt) ≥ γt +2 ∥w(i) +t +− wt∥2. +Combining the preceding two inequalities yields +γt +2 ∥w(i) +t +− wt∥ ≤ +� +2Lℓ(w(i) +t ; zi,t) + +� +2Lℓ(wt; z′ +i,t) +n +, +27 + +which immediately gives +∥w(i) +t +− wt∥ ≤ +2 +�� +2Lℓ(w(i) +t ; zi,t) + +� +2Lℓ(wt; z′ +i,t) +� +γtn +. +(8) +Let us now consider the following population risk and empirical risk over St with respect to the +loss function ℓ: +Rℓ(w) := E(x,y)∼D[ℓ(w; z)], +Rℓ +St(w) := 1 +n +n +� +i=1 +ℓ(w; zi,t). +Since St and S(i) +t +are both i.i.d. samples of the data distribution. It follows that +ESt +� +Rℓ(wt) | Ft−1 +� += ESt∪{z′ +i,t} +� +ℓ(wt; z′ +i,t) | Ft−1 +� +=ES(i) +t +� +Rℓ(w(i) +t ) | Ft−1 +� += ES(i) +t +∪{zi,t} +� +ℓ(w(i) +t ; zi,t) | Ft−1 +� +. +Since the above holds for all i = 1, ..., n, we can further show that +ESt +� +Rℓ(wt) | Ft−1 +� += 1 +n +n +� +i=1 +ES(i) +t +∪{zi,t} +� +ℓ(w(i) +t ; zi,t) | Ft−1 +� += 1 +n +n +� +i=1 +ESt∪{z′ +i,t} +� +ℓ(w(i) +t ; zi,t) | Ft−1 +� += 1 +n +n +� +i=1 +ESt∪{z′ +i,t} +� +ℓ(wt; z′ +i,t) | Ft−1 +� += 1 +n +n +� +i=1 +ES(i) +t +∪{zi,t} +� +ℓ(wt; z′ +i,t) | Ft−1 +� +. +(9) +Regarding the empirical case, we find that +ESt +� +Rℓ +St(wt) | Ft−1 +� += 1 +n +n +� +i=1 +ESt [ℓ(wt; zi,t) | Ft−1] = 1 +n +n +� +i=1 +ESt∪{z′ +i,t} [ℓ(wt; zi,t) | Ft−1] . +28 + +Combining the preceding two equalities gives that +|ESt [R(wt) − RSt(wt) | Ft−1]| += +���ESt +� +Rℓ(wt) − Rℓ +St(wt) | Ft−1 +���� += +����� +1 +n +n +� +i=1 +ESt∪{z′ +i,t} +� +ℓ(w(i) +t ; zi,t) − ℓ(wt; zi,t) | Ft−1 +������ +≤ 1 +n +n +� +i=1 +ESt∪{z′ +i,t} +����ℓ(w(i) +t ; zi,t) − ℓ(wt; zi,t) +��� | Ft−1 +� +≤ 1 +n +n +� +i=1 +ESt∪{z′ +i,t} +�� +2Lℓ(w(i) +t ; zi,t)∥w(i) +t +− wt∥ | Ft−1 +� +(8) +≤ 1 +n +n +� +i=1 +ES(i) +t +∪{zi,t} + +4Lℓ(w(i) +t ; zi,t) +γtn ++ +4L +� +ℓ(w(i) +t ; zi,t)ℓ(wt; z′ +i,t) +γtn +| Ft−1 + + +ζ1≤ +� L +γtn +� 1 +n +n +� +i=1 +ESt∪{z′ +i,t} +� +6ℓ(w(i) +t ; zi,t) + 2ℓ(wt; z′ +i,t) | Ft−1 +� +(9) += 8L +γtnESt +� +Rℓ(wt) | Ft−1 +� +≤ 8L +γtnESt [R(wt) | Ft−1] , +where in “ζ1” we have used the fact a2 + b2 ≥ 2ab and the last inequality is due to the fact r ≥ 0. +Let us now denote w∗ +t = arg minw∈W ∗ ∥w − wt∥. Conditioned on Ft−1, taking expectation on +both sides of the bound in Lemma 1 for w = w∗ +t−1 yields +ESt [RSt(wt) − R∗ | Ft−1] +≤γt +2 ESt +� +∥w∗ +t−1 − wt−1∥2 − ∥w∗ +t−1 − wt∥2 − ∥wt − wt−1∥2 | Ft−1 +� +≤γt +2 +� +∥w∗ +t−1 − wt−1∥2 − ESt +� +∥w∗ +t − wt∥2 | Ft−1 +�� +. +Combining the preceding two inequalities yields +ESt [R(wt) − R∗ | Ft−1] +=ESt [R(wt) − RSt(wt) + RSt(wt) − R∗ | Ft−1] +≤ |ESt [R(wt) − RSt(wt) | Ft−1]| + ESt [RSt(wt) − R∗ | Ft−1] +≤γt +2 +� +∥w∗ +t−1 − wt−1∥2 − ESt +� +∥w∗ +t − wt∥2 | Ft−1 +�� ++ 8L +γtnESt [R(wt) | Ft−1] +=γt +2 +� +∥w∗ +t−1 − wt−1∥2 − ESt +� +∥w∗ +t − wt∥2 | Ft−1 +�� ++ 8L +γtnESt [R(wt) − R∗ | Ft−1] + 8L +γtnESt [R∗] +≤γt +2 +� +∥w∗ +t−1 − wt−1∥2 − ESt +� +∥w∗ +t − wt∥2 | Ft−1 +�� ++ 1 +2ESt [R(wt) − R∗ | Ft−1] + 8L +γtnR∗, +where in the last inequality we have used the condition γt ≥ 52L +n . After rearranging the terms in +29 + +the above inequality we obtain +ESt [R(wt) − R∗ | Ft−1] ≤γt +� +∥w∗ +t−1 − wt−1∥2 − ESt +� +∥w∗ +t − wt∥2 | Ft−1 +�� ++ 16L +γtn R∗ +=γt +� +D2(wt−1, W ∗) − ESt +� +D2(wt, W ∗) | Ft−1 +�� ++ 16L +γtn R∗. +This implies the desired bound. +The following lemma is a direct consequence of Lemma 3. +Lemma 4. Suppose that the Assumptions 1 holds. Set γt ≥ 16L +n . Then the following holds for all +t ≥ 1: +E +� +D2(wt, W ∗) +� +≤ D2(w0, W ∗) + +t +� +τ=1 +16L +γ2τ nR∗. +Proof. Since R(wt) ≥ R∗ and γt ≥ 52L +n , the bound in Lemma 3 immediately implies that +ESt +� +D2(wt, W ∗) | Ft−1 +� +≤ D2(wt−1, W ∗) + 16L +γ2 +t nR∗. +(10) +By unfolding the above recurrent from time instance t to zero we obtain that for all t ≥ 1, +E +� +D2(wt, W ∗) +� +≤ D2(w0, W ∗) + +t +� +τ=1 +16L +γ2τ nR∗. +This proves the desired bound. +With all these lemmas in place, we are now ready to prove the main result in Theorem 1. +Proof of Theorem 1. Part (a): Note that the condition on n implies γt = +λρt +4 +≥ +λρ +4 +≥ +16L +n . +Applying Lemma 3 along with the condition R(wt) − R∗ ≥ λ +2D2(wt, W ∗) yields +(1 − ρ)E [R(wt) − R∗ | Ft−1] +≤γtD2(wt−1, W ∗) − +� +γt + λρ +2 +� +E +� +D2(wt, W ∗) | Ft−1 +� ++ 24L +γtn R∗ +≤λρt +4 D2(wt−1, W ∗) − λρ(t + 2) +4 +E +� +D2(wt, W ∗) | Ft−1 +� ++ 26L +λρtnR∗ +≤λρt +4 D2(wt−1, W ∗) − λρ(t + 2) +4 +E +� +D2(wt, W ∗) | Ft−1 +� ++ +27L +λρ(t + 1)nR∗, +where in the last inequality we have used 1 +t ≤ +2 +t+1 for t ≥ 1. The above inequality implies +tE [R(wt) − R∗ | Ft−1] +≤(t + 1)E [R(wt) − R∗ | Ft−1] +≤λρt(t + 1) +4(1 − ρ) D2(wt−1, W ∗) − λρ(t + 1)(t + 2) +4(1 − ρ) +E +� +D2(wt, W ∗) | Ft−1 +� ++ +27L +λnρ(1 − ρ)R∗. +30 + +Then based on the law of total expectation and after proper rearrangement we obtain +tE [R(wt) − R∗] +≤λρt(t + 1) +4(1 − ρ) E +� +D2(wt−1, W ∗) +� +− λρ(t + 1)(t + 2) +4(1 − ρ) +E +� +D2(wt, W ∗) +� ++ +27L +λnρ(1 − ρ)R∗. +(11) +By summing the above inequality from t = 1, ..., T and after normalization we obtain +2 +T(T + 1) +T +� +t=1 +tE [R(wt) − R∗] ≤ +λρ +T(T + 1)(1 − ρ)D2(w0, W ∗) + +28L +λρ(1 − ρ)(T + 1)nR∗ +≤ +2λρ +T(T + 1)D2(w0, W ∗) + +29L +λρ(T + 1)nR∗, +where in the last inequality we have used ρ ≤ 0.5. Consider the weighted output ¯wT = +2 +T(T+1) +�T +t=1 twt. +In view of the above inequality and the convexity and quadratic growth property of the risk function +R we have +E [R( ¯wT ) − R∗] ≤ 4ρ [R(w0) − R∗] +T(T + 1) ++ +29L +λρn(T + 1)R∗, +which then implies the desired bound in part (a). +Part (b): Note that γt = λρt +4 + 16L +n +≥ 16L +n +for all t ≥ 1. According to Lemma 4 we have the +following holds for all t ≥ 1: +E +� +D2(wt, W ∗) +� +≤D2(w0, W ∗) + +t +� +τ=1 +16L +γ2τ nR∗ ≤ D2(w0, W ∗) + +28L +λ2ρ2nR∗ +t +� +τ=1 +1 +τ 2 ≤ D2(w0, W ∗) + +29L +λ2ρ2nR∗. +(12) +Similar to the argument in part (a), applying Lemma 3 along with the quadratic growth condition +R(wt) − R∗ ≥ λ +2D2(wt, W ∗) and ρ ≤ 0.5 yields +1 +2E [R(wt) − R∗ | Ft−1] +≤(1 − ρ)E [R(wt) − R∗ | Ft−1] +≤γtD2(wt−1, W ∗) − +� +γt + λρ +2 +� +E +� +D2(wt, W ∗) | Ft−1 +� ++ 24L +γtn R∗ +≤λρt +4 D2(wt−1, W ∗) − λρ(t + 2) +4 +E +� +D2(wt, W ∗) | Ft−1 +� ++ 16L +n +� +D2(wt−1, W ∗) − E +� +D2(wt, W ∗) | Ft−1 +�� ++ 26L +λρtnR∗, +where in the second inequality we have used γt ≥ 52L +n , and in the last inequality we have used +31 + +γt ≥ λρt +4 . Then based on the law of total expectation and after proper rearrangement we have +E [R(wt) − R∗] +≤λρt +2 E +� +D2(wt−1, W ∗) +� +− λρ(t + 2) +2 +E +� +D2(wt, W ∗) +� ++ 25L +n +� +E +� +D2(wt−1, W ∗) +� +− E +� +D2(wt, W ∗) +�� ++ 27L +λtnρR∗, +which implies that +tE [R(wt) − R∗] +≤(t + 1)E [R(wt) − R∗] +≤λρt(t + 1) +2 +E +� +D2(wt−1, W ∗) +� +− λρ(t + 1)(t + 2) +2 +E +� +D2(wt, W ∗) +� ++ 25L(t + 1) +n +� +E +� +D2(wt−1, W ∗) +� +− E +� +D2(wt, W ∗) +�� ++ 27L(t + 1) +λtnρ +R∗ +≤λρt(t + 1) +2 +E +� +D2(wt−1, W ∗) +� +− λρ(t + 1)(t + 2) +2 +E +� +D2(wt, W ∗) +� ++ 26Lt +n +� +E +� +D2(wt−1, W ∗) +� +− E +� +D2(wt, W ∗) +�� ++ 28L +λnρR∗, +where in the last inequality we have used the fact t + 1 ≤ 2t as t ≥ 1. By summing the above +inequality from t = 1, ..., T and after normalization we obtain +2 +T(T + 1) +T +� +t=1 +tE [R(wt) − R∗] +≤ +2λρ +T(T + 1)D2(w0, W ∗) + +27L +nT(T + 1) +T +� +t=1 +D2(wt−1, W ∗) + +29L +λρ(T + 1)nR∗ +≤ +2λρ +T(T + 1)D2(w0, W ∗) + +27L +nT(T + 1) +T +� +t=1 +� +D2(w0, W ∗) + +29L +λ2ρ2nR∗ +� ++ +29L +λρ(T + 1)nR∗ += +� +2λρ +T(T + 1) + +27L +n(T + 1) +� +D2(w0, W ∗) + +� +216L2 +λ2ρ2n2(T + 1) + +29L +λρn(T + 1) +� +R∗, +where in the last inequality we have used (12). Using the convexity and quadratic growth property +in the above inequality yields +E [R( ¯wT ) − R∗] ≤ +� +4ρ +T(T + 1) + +28L +λn(T + 1) +� +[R(w0) − R∗] + +� +216L2 +λ2ρ2n2(T + 1) + +29L +λρn(T + 1) +� +R∗, +which then implies the desired bound in part (b). The proof is concluded. +32 + +A.2 +Proof of Theorem 2 +In this subsection we prove Theorem 2 which is restated below. +Theorem 2. Suppose that Assumptions 1 and 2 hold. +Consider ǫt ≡ 0 for implementing M- +SPP in both Phase-I and Phase-II of Algorithm 2. Consider the weighted average output ¯wT = +2 +(T−1)(T+2) +�T +t=2 twt in Phase-II. +(a) Suppose that n ≥ 128L +λ . Set m = 128L +λ +in Phase-I and γt = λt +8 for implementing M-SPP in +both Phase-I and Phase II. Then for any T ≥ 2, ¯wT satisfies +E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] +λ2n2T 2 ++ +L +λnT R∗. +(b) Set m = O(1) in Phase-I and γt = λt +8 + 16L +n +for implementing M-SPP in both Phase-I and +Phase-II. Then for any T ≥ 2, ¯wT satisfies +E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] +λ2nT ++ +L3 +λ3nT R∗. +Proof. Part (a): In Phase-I, by invoking the first part of Theorem 1 with ρ = 1/2 and T = n/m ≥ 1 +(with slight abuse of notation) we get immediately that +ES1 [R(w1) − R∗] ≤ 2m2 [R(w0) − R∗] +n2 ++ 210L +λn R∗. +(13) +In Phase-II, conditioned on F1, summing the recursion form (11) from t = 2, ..., T with ρ = 1/2 +and proper normalization yields +2 +(T − 1)(T + 2) +T +� +t=2 +tES2:t [R(wt) − R∗ | F1] +≤ 6λD2(w1, W ∗) +(T − 1)(T + 2) + +210L +λn(T + 2)R∗ ≤ 3 (R(w1) − R∗) +(T − 1)(T + 2) + +210L +λn(T + 2)R∗, +where in the last inequality we have used the quadratic growth property. Consider the weighted av- +erage output ¯wT = +2 +(T−1)(T+2) +�T +t=2 twt. Based on the above inequality and law of total expectation +we must have +E [R( ¯wT ) − R∗] ≤6ES1 [R(w1) − R∗] +(T − 1)(T + 2) ++ +210L +λn(T + 2)R∗ +≤6ES1 [R(w1) − R∗] +T 2 ++ 2102L +λnT R∗ +≤12m2 [R(w0) − R∗] +n2T 2 ++ 213L +λnT R∗ +≤222L2 [R(w0) − R∗] +λ2n2T 2 ++ 213L +λnT R∗, +where we have used the fact T ≥ 2 in multiple places and in the last but one step we have used (13). +This immediately implies the desired bound in Part (a). +33 + +Part (b): In Phase-I, by applying second part of Theorem 1 (with ρ = 1/2 and T = n/m ≥ 1) +and preserving the leading terms we obtain that +ES1 [R(w1) − R∗] ≲ +�m2 +n2 + L +λn +� +[R(w0) − R∗] + +� +L2 +λ2mn + L +λn +� +R∗ +≲ L +λn[R(w0) − R∗] + L2 +λ2nR∗. +(14) +In Phase-II, based on the proof argument of the part (b) of Theorem 1 we can show that the +weighted average output ¯wT = +2 +(T−1)(T+2) +�T +t=2 twt satisfies +E [R( ¯wT ) − R∗] ≲ +� 1 +T 2 + +L +λnT +� +ES1 [R(w1) − R∗] + +� +L2 +λ2n2T + +L +λnT +� +R∗ +≲ +� +L +λnT 2 + +L2 +λ2n2T +� +[R(w0) − R∗] + +� +L3 +λ3n2T + +L2 +λ2nT +� +R∗ +≲ L2 +λ2nT [R(w0) − R∗] + +L3 +λ3nT R∗, +where in the second step we have used (14). This proves the desired bound in Part (b). +A.3 +Proof of Theorem 3 +In this subsection, we prove Theorem 3 as following restated. +Theorem 3. Suppose that Assumption 1 holds. Set γt ≡ γ ≥ 16L +n . Let ¯wT = +1 +T +�T +t=1 wt be the +average output of Algorithm 1. Then +E [R( ¯wT ) − R∗] ≲ γ +T D2(w0, W ∗) + L +γnR∗. +Particularly for γ = +� +T +n + 16L +n , it holds that +E [R( ¯wT ) − R∗] ≲ +� +1 +√ +nT ++ L +nT +� +D2(w0, W ∗) + +L +√ +nT +R∗. +Proof. Since γt ≡ γ ≥ 16L +n , the bound in Lemma 3 is valid. Based on law of total expectation and +by summing that inequality from t = 1, ..., T with proper normalization we obtain +1 +T +T +� +t=1 +E [R(wt) − R∗] ≤ γ +T D2(w0, W ∗) + 16L +γn R∗. +Consider ¯wT = 1 +T +�T +t=1 wt. In view of the above inequality and convexity of R we have +E [R( ¯wT ) − R∗] ≤ γ +T D2(w0, W ∗) + 16L +γn R∗. +This proves the first desired bound. The second bound follows immediately by substituting γ = +� +T +n + 16L +n +> 16L +n +into the above bound. The proof is concluded. +34 + +A.4 +On the (Iteration) Stability of M-SPP +In this appendix subsection, we further provide a sensitivity analysis of M-SPP to the choice of reg- +ularization modulus {γt}t≥1, under the following notion of iteration stability essentially introduced +by Asi and Duchi (2019a,b). +Definition 2. A stochastic optimization algorithm generating iterates {wt}t≥1 for minimizing the +population risk R(w) is staid to be stable if +sup +t≥1 +D(wt, W ∗) < ∞, +with probability 1. +Before presenting the main results on the iteration stability of M-SPP, we first recall the +Robbins-Siegmund nonnegative almost supermartingale convergence lemma which is typically used +for establishing the stability and convergence of stochastic optimization methods including SPP (Asi and Duchi, +2019b). +Lemma 5 (Robbins and Siegmund (1971)). Consider four sequences of nonnegative random vari- +ables {Ut}, {Vt}, {αt}, {βt} that are measurable over a filtration {Ft}t≥0. Suppose that � +t αt < ∞, +� +t βt < ∞, and +E[Ut+1 | Ft] ≤ (1 + αt)Ut + βt − Vt. +Then there exits U∞ such that Ut +a.s. +−−→ U∞ and � +t Vt < ∞ with probability 1. +The following proposition shows that the sequence of estimation error {∥wt − w∗∥} is non- +divergent in expectation and it converges to some finite value and is bounded with probability 1. +Proposition 2. Suppose that the Assumptions 1 holds. Assume that γt ≥ 16L +n and � +t≥1 Lγ−2 +t +< ∞. +Then we have the following hold: +(a) E [D(wt, W ∗)] < ∞; +(b) D(wt, W ∗) converges to some finite value and supt≥1 D(wt, W ∗) < ∞ with probability 1. +Proof. Applying Lemma 4 yields that for all t ≥ 1 +E +� +D2(wt, W ∗) +� +≲ D2(w0, W ∗) + +t +� +τ=1 +L +γ2τnR∗ < ∞, +where we have used the given conditions on γt. This proves the part (a). To show the part (b), +invoking Lemma 5 with αt = Vt ≡ 0 and βt = 16L +γ2 +t nR∗ to (10) yields D(wt, W ∗) converges to some +finite value and thus supt≥1 D(wt, W ∗) < ∞ almost surely. +Remark 12. Proposition 2 shows that in contrast to minibatch SGD, the choice of γt in M-SPP +is insensitive to the gradient scale of loss functions for generating a non-divergent sequence of +estimation errors. +35 + +B +Proofs for the Results in Section 3 +In this section, we present the technical proofs for the main results stated in Section 3. +B.1 +Proof of Theorem 4 +In this subsection, we prove Theorem 4 which is restated below. +Theorem 4. Suppose that Assumptions 1, 2 and 3 hold. Let ρ ∈ (0, 1/4] be an arbitrary scalar +and set γt = λρt +4 . Suppose that n ≥ 76L +λρ . Assume that ǫt ≤ +ǫ +nt4 for some ǫ ∈ [0, 1]. Then for any +T ≥ 1, the weighted average output ¯wt = +2 +T(T+1) +�T +t=1 twt of Algorithm 1 satisfies +E [R( ¯wt) − R∗] ≲ ρ +T 2 (R(w0) − R∗) + +L +λρnT R∗ + +√ǫ +T 2 +� L +λρ + G +� 1 +λρ +� +. +Preliminaries. In what follows, we denote by ˜wt := arg minw∈W Ft(w) the exact solution of the +inner-loop minibatch ERM optimization, which plays the same role as wt in Section 2. We first +present the following lemma that upper bounds the discrepancy between the inexact minimizer wt +and the exact minimizer ˜wt. +Lemma 6. Assume that the loss function ℓ is convex with respect to its first argument and r is +convex. Then for any w ∈ W, we have +∥wt − ˜wt∥ ≤ +� +2ǫt +γt +. +Proof. Using arguments identical to those of Lemma 1 we can show that for all w ∈ W, +RSt( ˜wt) − RSt(w) ≤ γt +2 +� +∥w − wt−1∥2 − ∥w − ˜wt∥2 − ∥ ˜wt − wt−1∥2� +. +(15) +Setting w = wt in the above yields +γt +2 ∥wt − ˜wt∥2 ≤ Ft(wt) − Ft( ˜wt) ≤ ǫt, +which directly implies ∥wt − ˜wt∥ ≤ +� +2ǫt/γt. This proves the second desired bound. +The following lemma as an extension of Lemma 3 to the inexact setting. +Lemma 7. Suppose that the Assumptions 1, 2 and 3 hold. +Assume that γt ≥ +19L +n . Then the +following bound holds for any ρ ∈ (0, 1): +E [R(wt) − R∗ | Ft−1] ≤γt +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 19L +γtn R∗ + +� +3n + 4γt +ρλ +� +ǫt + 3G +� +2ǫt +γt +. +36 + +Alternatively, for any w∗ ∈ W ∗, under Assumptions 1 and 3 we have +E [R(wt) − R∗ | Ft−1] ≤γt +� +∥wt−1 − w∗∥2 − E +� +∥wt − w∗∥2 | Ft−1 +�� ++ 19L +γtn R∗ ++ 3nǫt + +� +2 +� +2γtE [∥wt − w∗∥ | Ft−1] + 3G +� +2 +γt +� √ǫt. +Proof. Let us decompose E [R(wt) − R∗ | Ft−1] into the following three terms: +E [R(wt) − R∗ | Ft−1] += E [R(wt) − R( ˜wt) | Ft−1] +� +�� +� +A ++ E [R( ˜wt) − RSt( ˜wt) | Ft−1] +� +�� +� +B ++ E [RSt( ˜wt) − R∗ | Ft−1] +� +�� +� +C +. +We next bound these three terms respectively. To bound the term A, we can show that +|A| := |E [R(wt) − R( ˜wt) | Ft−1] | += +���E +� +Rℓ(wt) − Rℓ( ˜wt) | Ft−1 +� ++ E [r(wt) − r( ˜wt)] | Ft−1 +��� +≤E [Ez|ℓ(wt; z) − ℓ( ˜wt; z)| | Ft−1] + E [|r(wt) − r( ˜wt)| | Ft−1] +ζ1≤E +� +Ez +�� +2Lℓ(wt; z)∥wt − ˜wt∥ +� +| Ft−1 +� ++ E [G∥wt − ˜wt∥ | Ft−1] +≤E +� +Ez +� L +γtnℓ(wt; z) + γtn +2 ∥wt − ˜wt∥2 +� +| Ft−1 +� ++ E [G∥wt − ˜wt∥ | Ft−1] +=E +� L +γtnRℓ(wt) | Ft−1 +� ++ ESt +�γtn +2 ∥wt − ˜wt∥2 + G∥wt − ˜wt∥ | Ft−1 +� +≤E +� L +γtnR(wt) | Ft−1 +� ++ nǫt + G +�2ǫt +γt +, +where in “ζ1” we have used the convexity of loss and Lemma 2 and the Assumption 3 and in the +last inequality we have used r > 0 and the perturbation bound of Lemma 6. +To bound the term B, using about the same proof arguments as for Lemma 3 we can show that +B :=E [R( ˜wt) − RSt( ˜wt) | Ft−1] +≤ 8L +γtnE [R( ˜wt) | Ft−1] += 8L +γtnE [R( ˜wt) − R(wt)] + 8L +γtnE [R(wt) | Ft−1] +≤1 +2|A| + 8L +γtnE [R(wt) | Ft−1] , +where we have used the condition on minibatch size γt. +To bound the term C, based on the definition of ˜wt and by invoking Lemma 1 with w = w∗ +t−1 +37 + +we can verify that +C :=E [RSt( ˜wt) − R∗ | Ft−1] +≤γt +2 E +� +∥w∗ +t−1 − wt−1∥2 − ∥w∗ +t−1 − ˜wt∥2 − ∥ ˜wt − wt−1∥2 | Ft−1 +� +≤γt +2 E +� +∥w∗ +t−1 − wt−1∥2 − ∥w∗ +t−1 − ˜wt∥2 | Ft−1 +� +=γt +2 +� +∥w∗ +t−1 − wt−1∥2 − E +� +∥w∗ +t−1 − wt + wt − ˜wt∥2 | Ft−1 +�� +=γt +2 +� +∥w∗ +t−1 − wt−1∥2 − E +� +∥w∗ +t−1 − wt∥2 + 2⟨w∗ +t−1 − wt, wt − ˜wt⟩ + ∥wt − ˜wt∥2 | Ft−1 +�� +≤γt +2 +� +∥w∗ +t−1 − wt−1∥2 − E +�� +1 − ρλ +2γt +� +∥w∗ +t−1 − wt∥2 − 2γt +ρλ ∥wt − ˜wt∥2 | Ft−1 +�� +≤γt +2 +� +∥w∗ +t−1 − wt−1∥2 − E +�� +1 − ρλ +2γt +� +∥w∗ +t − wt∥2 | Ft−1 +�� ++ 2γtǫt +ρλ +=γt +2 +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 2γtǫt +ρλ . +Combining the above three bounds yields +E [R(wt) − R∗ | Ft−1] = A + B + C +≤3 +2|A| + 8L +γtnE [R(wt) | Ft−1] ++ γt +2 +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 2γtǫt +ρλ +≤E +� 3L +2γtnR(wt) | Ft−1 +� ++ 3n +2 ǫt + 3G +2 +�2ǫt +γt ++ 8L +γtnE [R(wt) | Ft−1] ++ γt +2 +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 2γtǫt +ρλ +≤γt +2 +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 9.5L +γtn E [R(wt) | Ft−1] ++ +�3n +2 + 2γt +ρλ +� +ǫt + 3G +2 +�2ǫt +γt +=γt +2 +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 9.5L +γtn E [R∗] + 9.5L +γtn E [R(wt) − R∗ | Ft−1] ++ +�3n +2 + 2γt +ρλ +� +ǫt + 3G +2 +�2ǫt +γt +≤γt +2 +� +D2(wt−1, W ∗) − E +�� +1 − ρλ +2γt +� +D2(wt, W ∗) | Ft−1 +�� ++ 9.5L +γtn E [R∗] + 1 +2E [R(wt) − R∗ | Ft−1] ++ +�3n +2 + 2γt +ρλ +� +ǫt + 3G +2 +� +2ǫt +γt +, +where in the last inequality we have used the condition γt ≥ 19L +n . After rearranging the terms in +the above inequality we obtain the first desired bound. +38 + +To derive the second bound, for any fixed w∗ ∈ W ∗, we note that the term C can be alternatively +bounded as +C ≤γt +2 +� +∥w∗ − wt−1∥2 − E +� +∥w∗ − wt∥2 + 2⟨w∗ − wt, wt − ˜wt⟩ + ∥wt − ˜wt∥2 | Ft−1 +�� +≤γt +2 +� +∥w∗ − wt−1∥2 − E +� +∥w∗ − wt∥2 − 2∥wt − w∗∥∥wt − ˜wt∥ | Ft−1 +�� +≤γt +2 +� +∥w∗ − wt−1∥2 − E +� +∥w∗ − wt∥2 | Ft−1 +�� ++ +� +2γtǫtE [∥w∗ − wt∥ | Ft−1] . +Similar to the proof of the first bound, we can derive that +ESt [R(wt) − R∗ | Ft−1] = A + B + C +≤3 +2|A| + 8L +γtnE [R(wt) | Ft−1] + γt +2 +� +∥w∗ − wt−1∥2 − E +� +∥w∗ − wt∥2 | Ft−1 +�� ++ +� +2γtǫtE [∥w∗ − wt∥ | Ft−1] +≤γt +2 +� +∥w∗ − wt−1∥2 − E +� +∥w∗ − wt∥2 | Ft−1 +�� ++ 9.5L +γtn R∗ ++ 1 +2E [R(wt) − R∗ | Ft−1] + 3n +2 ǫt + +� +2γtǫtE [∥w∗ − wt∥ | Ft−1] + 3G +2 +� +2ǫt +γt +. +After rearranging the terms in the above inequality we obtain the second desired bound. +With the above preliminary results in hand, we are now in the position to prove the main result +of Theorem 4. +Proof of Theorem 4. Since by assumption R(wt) − R∗ ≥ λ +2 D2(wt, W ∗) and γt = λρt +4 +≥ λρ +4 ≥ 19L +n , +based on the first bound in Lemma 7 we can show that +(1 − 2ρ)E [R(wt) − R∗ | Ft−1] +≤γtD2(wt−1, W ∗) − +� +γt + ρλ +2 +� +E +� +D2(wt, W ∗) | Ft−1 +� ++ 19L +γtn R∗ + +� +3n + 4γt +ρλ +� +ǫt + 3G +�2ǫt +γt +≤λρt +4 D2(wt−1, W ∗) − ρλ(t + 2) +4 +E +� +D2(wt, W ∗) | Ft−1 +� ++ 76L +λρntR∗ + (3n + t) ǫt + 6G +� +2ǫt +λρt. +Now suppose that ǫt ≤ +ǫ +nt4 for some ǫ ∈ [0, 1]. Since ρ ≤ 1/4, the above implies +E [R(wt) − R∗ | Ft−1] +≤λρt +2 D2(wt−1, W ∗) − ρλ(t + 2) +2 +E +� +D2(wt, W ∗) | Ft−1 +� ++ 152L +λρnt R∗ + +� 6 +t4 + 2 +t3 + 12G +� +2 +λρt5 +� √ǫ. +The above inequality then implies +tE [R(wt) − R∗ | Ft−1] ≤(t + 1)E [R(wt) − R∗ | Ft−1] +≤λρt(t + 1) +2 +D2(wt−1, W ∗) − λρ(t + 1)(t + 2) +2 +E +� +D2(wt, W ∗) | Ft−1 +� ++ 304L +λρn R∗ + +�12 +t3 + 4 +t2 + 24G +t +� 2 +λρt +� √ǫ, +39 + +where we have used the fact t+1 +t +≤ 2 for t ≥ 1. In view of the law of total expectation, summing +the above inequality from t = 1, ..., T with natural normalization yields +2 +T(T + 1) +T +� +t=1 +tE [R(wt) − R∗] +≤ +2λρ +T(T + 1)D2(w0, W ∗) + +608L +λρ(T + 1)nR∗ + +√ǫ +T(T + 1) +� +64 + 192G +� +2 +λρ +� +≤ +4ρ +T(T + 1)(R(w0) − R∗) + +608L +λρ(T + 1)nR∗ + +√ǫ +T(T + 1) +� +64 + 192G +� +2 +λρ +� +, +which then immediately leads to the desired bound. The proof is concluded. +B.2 +Proof of Theorem 5 +In this subsection, we prove Theorem 5 as following restated. +Theorem 5. Suppose that Assumptions 1 and 3 hold. +Set γt ≡ γ ≥ +19L +n . +Assume that ǫt ≤ +min +� +ǫ +n2t5 , 2G2 +9n2γ +� +for some ǫ ∈ [0, 1]. Then the average output ¯wT = +1 +T +�T +t=1 wt of Algorithm 1 +satisfies +E [R( ¯wT ) − R∗] ≲ γ +T D2(w0, W ∗) + L +γnR∗ + +� L +γn + +γ +LnT + +G +√γnT +� √ǫ. +Particularly for γ = +� +T +n + 19L +n , it holds that +E [R( ¯wT ) − R∗] ≲ +� +1 +√ +nT ++ L +nT +� +D2(w0, W ∗) + +L +√ +nT +R∗ + +�L + G +√ +nT ++ 1 +nT +� √ǫ. +The following lemma, which can be proved by induction (see, e.g., Schmidt et al., 2011), will +be used to prove the main result. +Lemma 8. Assume that the nonnegative sequence {uτ}τ≥1 satisfies the following recursion for all +t ≥ 1: +u2 +t ≤ St + +t +� +τ=1 +ατuτ, +with {Sτ}τ≥1 an increasing sequence, S0 ≥ u2 +0 and ατ ≥ 0 for all τ. Then, the following bound +holds for all t ≥ 1: +ut ≤ +� +St + +t +� +τ=1 +ατ. +40 + +The following lemma gives an upper bound on the expected estimation error E [∥w∗ +0 − wt∥]. +Lemma 9. Under the conditions of Theorem 5, the following bound holds for all t ≥ 1: +E [∥wt − w∗ +0∥] ≤ ∥w0 − w∗ +0∥ + +� t +γ R∗ + 6tG +γ . +Proof. Recall that w∗ +0 = arg minw∈W ∗ ∥w0 −w∥. Since γt ≡ γ ≥ 19L +n , the second bound in Lemma 7 +is valid. For any t ∈ [T], by summing that inequality with w∗ = w∗ +0 from τ = 1, ..., t we obtain +t +� +τ=1 +E [R(wτ) − R∗] + γE +� +∥wt − w∗ +0∥2� +≤γ∥w0 − w∗ +0∥2 + 19L +γn tR∗ + 3n +t +� +τ=1 +ǫτ + +t +� +τ=1 +� +2 +� +2γE [∥w∗ +0 − wτ∥] + 3G +� 2 +γ +� √ǫτ. +(16) +Dropping the non-negative term �t +τ=1 ES[τ] [R(wτ) − R∗] from the above inequality yields +E +� +∥wt − w∗ +0∥2� +� +�� +� +u2 +t +≤∥w0 − w∗ +0∥2 + 19L +γ2ntR∗ + 3n +γ +t +� +τ=1 +ǫτ + +t +� +τ=1 +� +2 +� +2 +γ E [∥w∗ +0 − wτ∥] + 3G +√ +2 +γ√γ +� +√ǫτ +ζ1≤∥w0 − w∗ +0∥2 + t +γ R∗ + +t +� +τ=1 +� +3n +γ ǫτ + 3G +√ +2 +γ√γ +√ǫτ +� ++ +t +� +τ=1 +� +2 +� +2ǫτ +γ +� +E [∥w∗ +0 − wτ∥2] +� +≤ ∥w0 − w∗ +0∥2 + t +γ R∗ + +t +� +τ=1 +4G√2ǫτ +γ√γ +� +�� +� +St ++ +t +� +τ=1 + + + +2 +� +2ǫτ +γ +� �� � +ατ +� +E [∥w∗ +0 − wτ∥2] +� +�� +� +uτ + + + + , +where in “ζ1” we have used γ ≥ +19L +n +and the basic inequality E2[X] ≤ E[X2], and in the last +inequality we have used the condition ǫτ ≤ 2G2 +9n2γ for all τ ≥ 1. By invoking Lemma 8 to the above +recursion form we can derive that for all t ≥ 1, +� +E [∥wt − w∗ +0∥2] ≤ +� +� +� +�∥w0 − w∗ +0∥2 + t +γ R∗ + +t +� +τ=1 +4G√2ǫτ +γ ++ +t +� +τ=1 +2 +�2ǫτ +γ +≤∥w0 − w∗ +0∥ + +� t +γ R∗ + +t +� +τ=1 +� +4G√2ǫτ +γ√γ ++ +t +� +τ=1 +2 +� +2ǫτ +γ +≤∥w0 − w∗ +0∥ + +� t +γ R∗ + 6Gt +γ , +where the last inequality is due to the condition ǫτ ≤ 2G2 +9γ for all τ ≥ 1. The above inequality then +directly implies the desired bound for all t ∈ [T]. +41 + +Now we are ready to prove the main result of Theorem 5. +Proof of Theorem 5. Dropping non-negative term γE +� +∥wt − w∗∥2� +in (16) followed by natural nor- +malization yields +1 +T +T +� +t=1 +E [R(wt) − R∗] +≤ γ +T ∥w0 − w∗ +0∥2 + 19L +γn R∗ + 3n +T +T +� +t=1 +ǫt + 1 +T +T +� +t=1 +� +2 +� +2γE [∥wt − w∗ +0∥] + 3G +� +2 +γ +� √ǫt +ζ1≤ γ +T ∥w0 − w∗ +0∥2 + 19L +γn R∗ + 3n +T +T +� +t=1 +ǫt ++ 1 +T +T +� +t=1 +� +2 +√ +2 +�√γ∥w0 − w∗ +0∥ + +√ +tR∗ + 6Gt +√γ +� ++ 3G +� 2 +γ +� √ǫt +ζ2≤ γ +T ∥w0 − w∗ +0∥2 + 19L +γn R∗ + 1 +T +T +� +t=1 +� +3nǫt + 2 +� +2γǫt∥w0 − w∗ +0∥ + 2 +� +2tR∗ǫt + 15√2ǫtGt +√γ +� +ζ3≤ γ +T ∥w0 − w∗ +0∥2 + 19L +γn R∗ ++ 1 +T +T +� +t=1 +� +3nǫt + γ∥w0 − w∗ +0∥2 +t2 ++ 2t2ǫt + 2LR∗ +γn ++ γntǫt +L ++ 15√2ǫtGt +√γ +� +≤3γ +T ∥w0 − w∗ +0∥2 + 21L +γn R∗ + 1 +T +T +� +t=1 +� +3nǫt + 2t2ǫt + γntǫt +L ++ 15√2ǫtGt +√γ +� +, +, +where “ζ1”follows from Lemma 9, “ζ2” is due to t ≥ 1 and “ζ3” is due to ab ≤ (a2 + b2)/2. Now +consider ǫt ≤ +ǫ +n2t5 for some ǫ ∈ [0, 1]. Then it follows from the preceding inequality that +1 +T +T +� +t=1 +E [R(wt) − R∗] +≤3γ +T ∥w0 − w∗ +0∥2 + 21L +γn R∗ + 1 +T +T +� +t=1 +� +3 +nt5 + 2 +nt3 + +γ +nLt4 + 15 +√ +2G +nt1.5√γ +� +√ǫ +≤3γ +T ∥w0 − w∗ +0∥2 + 21L +γn R∗ + 1 +T +� +6 +n + 4 +n + 2γ +nL + 45 +√ +2G +n√γ +� +√ǫ. +Let ¯wT = 1 +T +�T +t=1 wt. Combined with the convexity of R, the above inequality implies +E [R( ¯wT ) − R∗] ≲ γ +T D2(w0, W ∗) + L +γnR∗ + +� 1 +nT + +γ +LnT + +G +nT√γ +� √ǫ. +This proves the first bound. Substituting γ = +� +T +n + 19L +n +> 19L +n into the above bound and preserving +the leading terms yields the following second desired bound: +E [R( ¯wT ) − R∗] ≲ +� +1 +√ +nT ++ L +nT +� +D2(w0, W ∗) + +L +√ +nT +R∗ + +�L + G +√ +nT ++ 1 +nT +� √ǫ. +42 + +The proof is concluded. +C +Proofs for the Results in Section 4 +In this section, we present the proofs for the high probability estimation error bounds stated in +Section 4. +C.1 +Proof of Proposition 1 +In this subsection, we prove Proposition 1 as below restated . +Proposition 1. Suppose that Assumption 1 holds and the loss function is bounded such that 0 ≤ +ℓ(y′, y) ≤ M for all y, y′. Let S = {St}t∈[T] and S′ = {S′ +t}t∈[T] be two sets of data minibatches +satisfying S .= S′. Then +(a) The weighted average output ¯wT and ¯w′ +T respectively generated by M-SPP (Algorithm 1) over +S and S′ satisfy +sup +S,S′ ∥ ¯wT − ¯w′ +T ∥ ≤ +4 +√ +2LM +n mint∈[T] γt ++ +T +� +t=1 +2 +�2ǫt +γt +. +(b) The weighted average output ¯wT and ¯w′ +T respectively generated by M-SPP-SWoR (Algo- +rithm 3) over S and S′ satisfy +sup +S,S′ Eξ[T ] +� +∥ ¯wT − ¯w′ +T ∥ +� +≤ +T +� +t=1 +� +4 +√ +2LM +nTγt ++ 2 +�2ǫt +γt +� +. +We first need to show the following preliminary result which is about the expansion property +of M-SPP update when performed over identical or different minibatches. +Lemma 10. Suppose that Assumptions 1 holds and the loss function ℓ is bounded in the interval +[0, M]. From w0 = w′ +0, let us define the sequences {wt}t∈[T] and {w′ +t}t∈[T] that are respectively +generated over {St}t∈[T] and {S′ +t}t∈[T] according to +Ft(wt) ≤ min +w∈W +� +Ft(w) := RSt(w) + γt +2 ∥w − wt−1∥2� ++ ǫt, +F ′ +t(w′ +t) ≤ min +w∈W +� +F ′ +t(w) := RS′ +t(w) + γt +2 ∥w − w′ +t−1∥2� ++ ǫt. +Assume that either St = S′ +t or St .= S′ +t for all t ∈ [T]. Let βt = 1{St̸=S′ +t}. Then the following bound +holds for all t ∈ [T], +∥wt − w′ +t∥ ≤ +t +� +τ=1 +� +βτ +4 +√ +2LM +nγτ ++ 2 +�2ǫτ +γτ +� +. +43 + +Proof. Let w∗ +t = arg minw Ft(w) and w′∗ +t = arg minw F ′ +t(w). It follows from Lemma 1 that +RSt(w∗ +t ) − RSt(w′∗ +t ) ≤γt +2 +� +∥w′∗ +t − wt−1∥2 − ∥w′∗ +t − w∗ +t ∥2 − ∥w∗ +t − wt−1∥2� +RS′ +t(w′∗ +t ) − RS′ +t(w∗ +t ) ≤γt +2 +� +∥w∗ +t − w′ +t−1∥2 − ∥w′∗ +t − w∗ +t ∥2 − ∥w′∗ +t − w′ +t−1∥2� +. +Summing both sides of the above two inequalities yields +RSt(w∗ +t ) − RSt(w′∗ +t ) + RS′ +t(w′∗ +t ) − RS′ +t(w∗ +t ) +≤γt +2 +� +∥w′∗ +t − wt−1∥2 − ∥w∗ +t − wt−1∥2 + ∥w∗ +t − w′ +t−1∥2 − ∥w′∗ +t − w′ +t−1∥2 − 2∥w′∗ +t − w∗ +t ∥2� +=γt +2 +� +2⟨w∗ +t − w′∗ +t , wt−1 − w′ +t−1⟩ − 2∥w′∗ +t − w∗ +t ∥2� +≤γt +2 +� +∥wt−1 − w′ +t−1∥2 − ∥w∗ +t − w′∗ +t ∥2� +. +We need to distinguish the following two complementary cases. +Case I: St = S′ +t. In this case, the previous inequality immediately leads to +∥w∗ +t − w′∗ +t ∥ ≤ ∥wt−1 − w′ +t−1∥. +By using triangle inequality and Lemma 6 we obtain +∥wt − w′ +t∥ ≤ ∥wt − w∗ +t ∥ + ∥w∗ +t − w′∗ +t ∥ + ∥w′ +t − w′∗ +t ∥ ≤ ∥wt−1 − w′ +t−1∥ + 2 +�2ǫt +γt +. +(17) +Case II: St and S′ +t differ in a single element. In this case, we have +∥w∗ +t − w′∗ +t ∥2 +≤∥wt−1 − w′ +t−1∥2 + 2 +γt +� +RSt(w′∗ +t ) − RSt(w∗ +t ) + RS′ +t(w∗ +t ) − RS′ +t(w′∗ +t ) +� +=∥wt−1 − w′ +t−1∥2 + 2 +γt +� +Rℓ +St(w′∗ +t ) − Rℓ +St(w∗ +t ) + Rℓ +S′ +t(w∗ +t ) − Rℓ +S′ +t(w′∗ +t ) +� +=∥wt−1 − w′ +t−1∥2 + 2 +γt + + 1 +|St| +� +z∈St +(ℓ(w′∗ +t ; z) − ℓ(w∗ +t ; z) + +1 +|S′ +t| +� +z∈S′ +t +(ℓ(w∗ +t ; z) − ℓ(w′∗ +t ; z)) + + +≤∥wt−1 − w′ +t−1∥2 + 4 +√ +2LM +nγt +∥w∗ +t − w′∗ +t ∥. +where in the last inequality we have used ℓ(·; ·) is +√ +2LM-Lipschitz with respect to its first argument +which is implied by Lemma 2, and St and S′ +t differ in a single element as well. Since x2 ≤ y2 + ax +implies x ≤ y + a for all x, y, a > 0, we can derive from the above that +∥w∗ +t − w′∗ +t ∥ ≤ ∥wt−1 − w′ +t−1∥ + 4 +√ +2LM +nγt +. +Then based on triangle inequality and Lemma 6 we have +∥wt − w′ +t∥ ≤ ∥wt − w∗ +t ∥ + ∥w∗ +t − w′∗ +t ∥ + ∥w′ +t − w′∗ +t ∥ ≤ ∥wt−1 − w′ +t−1∥ + 4 +√ +2LM +nγt ++ 2 +�2ǫt +γt +. +(18) +44 + +Let βt = 1{St̸=S′ +t} where 1{C} is the indicator function of the condition C. Based on the recursion +forms (17) and (18) and the condition w0 = w′ +0 we can show that for all t ∈ [T] +∥wt − w′ +t∥ ≤ +t +� +τ=1 +� +4βτ +√ +2LM +nγτ ++ 2 +�2ǫτ +γτ +� +, +which is the desired bound. +Now we are in the position to prove the main result in Proposition 1. +Proof of Proposition 1. Consider a fixed pair of minibatch sets S .= S′. +Part (a): Let {wt}t∈[T] and {w′ +t}t∈[T] be two solution sequences that are respectively generated +over {St}t∈[T] and {S′ +t}t∈[T] by Algorithm 1. +At each time instance t, define random variable +βt := 1{St̸=S′ +t}. Since by assumption S and S′ differ only in a single minibatch, there must exist +one and only one t ∈ [T] such that βt = 1 and βj = 0 for all j ∈ [T], j ̸= t. Then in the worst case +of βτ = 1 for τ = arg mini∈[t] γi, it follows from Lemma 10 that for all t ∈ [T], +∥wt − w′ +t∥ ≤ +4 +√ +2LM +n mini∈[t] γi ++ +t +� +i=1 +2 +�2ǫi +γi +≤ +4 +√ +2LM +n mini∈[T] γi ++ +T +� +i=1 +2 +�2ǫi +γi +. +Then the convex combination nature of ¯wT and ¯w′ +T implies that +∥ ¯wT − ¯w′ +T ∥ ≤ +� +t γt∥wt − w′ +t∥ +� +t γt +≤ +4 +√ +2LM +n mint∈[T] γt ++ +T +� +t=1 +2 +� +2ǫt +γt +. +The desired result follows immediately as the above bound holds for any pair {S, S′}. +Part (b): Recall that {ξt}t∈[T] are the uniform random indices for iteratively selecting data +minibatches from S and S′. +Let {wt}t∈[T] and {w′ +t}t∈[T] be two solution sequences that are +respectively generated over {Sξt}t∈[T] and {S′ +ξt}t∈[T] by Algorithm 3. +Define random variable +βt := 1� +Sξt̸=S′ +ξt +�. Since by assumption S and S′ differ only in a single minibatch, under without- +replacement sampling scheme, there must exist one and only one t ∈ [T] such that βt = 1 and +βj = 0 for all j ∈ [T], j ̸= t. Let us define the event Et := {βt = 1 and βj̸=t,j∈[T] = 0} for all t ∈ [T]. +Then the uniform randomness of ξt implies that +R (Et) = 1 +T , +t ∈ [T]. +Given t ∈ [T], suppose that Eτ occurs for some τ ∈ [t]. Then it follows from Lemma 10 that +∥wt − w′ +t∥ ≤ 4 +√ +2LM +nγτ ++ +t +� +i=1 +2 +�2ǫi +γi +. +45 + +Suppose that Eτ occurs for some τ ∈ {t + 1, t + 2, ..., T}, again it follows from Lemma 10 that +∥wt − w′ +t∥ ≤ +t +� +i=1 +2 +�2ǫi +γi +. +Then we have +Eξ[t] +� +∥wt − w′ +t∥ +� += +T +� +τ=1 +R (Eτ) +� +∥wt − w′ +t∥ | Eτ +� +≤ +t +� +τ=1 +� +4 +√ +2LM +nTγt ++ +t +� +i=1 +2 +T +� +2ǫi +γi +� ++ +T +� +τ=t+1 +� t +� +i=1 +2 +T +� +2ǫt +γt +� += +t +� +τ=1 +� +4 +√ +2LM +nTγτ ++ 2 +� +2ǫτ +γτ +� +≤ +T +� +t=1 +� +4 +√ +2LM +nTγt ++ 2 +� +2ǫt +γt +� +. +It follows that +Eξ[T ] +� +∥ ¯wT − ¯w′ +T ∥ +� +≤ +� +t γtEξ[t] [∥wt − w′ +t∥] +� +t γt +≤ +T +� +t=1 +� +4 +√ +2LM +nTγt ++ 2 +� +2ǫt +γt +� +. +The desired result follows immediately as the above bound holds for any pair {S, S′}. +C.2 +Proof of Theorem 6 +In this subsection, we prove Theorem 6 that is restated below. +Theorem 6. Suppose that Assumptions 1, 2, 3 hold and the loss function ℓ is bounded in the +interval (0, M]. Let ρ ∈ (0, 1/4] be an arbitrary scalar and set γt = λρt +4 . Suppose that n ≥ 76L +λρ . +Assume that ǫt ≤ min +� +ǫ +nt4 , +LM +λρn2T 2t +� +for some ǫ ∈ [0, 1]. Then with probability at least 1 − δ over +S, the weighted average output ¯wT of M-SPP-SWoR (Algorithm 3) satisfies +Eξ[T ] [D( ¯wT , W ∗)] +≲ +� +LM log(1/δ) log(T) +λρ +√ +nT ++ +� +ρ [R(w0) − R∗] +λT 2 ++ +L +λ2ρnT R∗ + +√ǫ +λT 2 +� L +λρ + G +� 1 +λρ +� +. +To show this result, we need to use the following restated McDiarmid’s inequality (McDiarmid, +1989) which is also known as bounded difference inequality. +Lemma 11 (McDiarmid’s/Bounded differences inequality). Let X1, X2, ..., XN be independent ran- +dom variables valued in X. Suppose that the function h : X N �→ R satisfies the bounded differences +property, i.e., the following inequality holds for any i ∈ [N] and any x1, ..., xN, x′ +i: +|h(x1, ..., xi−1, xi, xi+1, ..., xN) − h(x1, ..., xi−1, x′ +i, xi+1, ..., xN)| ≤ ci. +Then for any ε > 0, +P (h(X1, ..., XN) − E [h(X1, ..., XN)] ≥ ε) ≤ exp +� +− +2ε2 +�N +i=1 c2 +i +� +. +46 + +Now we are ready to prove Theorem 6. +Proof of Theorem 6. Let S = {St}t∈[T] and S′ = {S′ +t}t∈[T] be two sets of data minibatches such +that S .= S′. Then according to Proposition 1 the weighted average output ¯wT and ¯w′ +T respectively +generated by Algorithm 3 over S and S′ satisfy +sup +S,S′ Eξ[T ] +� +∥ ¯wT − ¯w′ +T ∥ +� +≤ +T +� +t=1 +� +4 +√ +2LM +nTγt ++ 2 +�2ǫt +γt +� +≤ +T +� +t=1 +� +5 +√ +2LM +nTγt +� +≤ 20 +√ +2LM(1 + log(T)) +λρnT +, +where in the last but one inequality we have used the condition ǫt ≤ +LM +4n2T 2γt = +LM +λρN2t. It follows +from the triangle inequality and the above bound that +sup +S,S′ Eξ[T ] +���D( ¯wT , W ∗) − D( ¯w′ +T , W ∗) +��� +≤ sup +S,S′ Eξ[T ] +� +∥ ¯wT − ¯w′ +T ∥ +� +≤ 20 +√ +2LM(1 + log(T)) +λρnT +. +Since ξ[T] are independent on S, as a direct consequence of applying McDiarmid’s inequality with +ci ≡ c = 20 +√ +2LM(1+log(T)) +λρnT +to h(S) := D( ¯wT , W ∗), we can show that with probability at least 1 − δ +over the randomness of S, +Eξ[T ] +� +D( ¯wT , W ∗) − ES +� +Eξ[T ] [D( ¯wT , W ∗)] +�� +≤ c +� +nT log(1/δ) +2 += 20 +� +LM log(1/δ)(1 + log(T)) +λρ +√ +nT +. +We next derive a bound for ES [D( ¯wT , W ∗)]. +In view of Jensen’s inequality and the quadratic +growth property of F we have +ES +� +Eξ[T ] [D( ¯wT , W ∗)] +� +=Eξ[T ] [ES [D( ¯wT , W ∗)]] +≤Eξ[T ] +�� +ES [D2( ¯wT , W ∗)] +� +≤Eξ[T ] +�� +2 +λES [R( ¯wT ) − R∗] +� +≲Eξ[T ] + + +� +ρ [R(w0) − R∗] +λT 2 ++ +L +λ2ρnT R∗ + +√ǫ +λT 2 +� L +λρ + G +� +1 +λρ +� + += +� +ρ [R(w0) − R∗] +λT 2 ++ +L +λ2ρnT R∗ + +√ǫ +λT 2 +� L +λρ + G +� +1 +λρ +� +, +where in the last inequality we have invoked Theorem 4. Therefore, based on the previous two +inequalities we obtain that with probability at least 1 − δ over S, +Eξ[T ] [D( ¯wT , W ∗)] +≲ +� +LM log(1/δ) log(T) +λρ +√ +nT ++ +� +ρ [R(w0) − R∗] +λT 2 ++ +L +λ2ρnT R∗ + +√ǫ +λT 2 +� L +λρ + G +� +1 +λρ +� +, +which gives the desired bound. +47 + +C.3 +Proof of Theorem 7 +Here we prove the following restated Theorem 7. +Theorem 7. Suppose that Assumptions 1 and 3 hold and the loss function ℓ is bounded in the +interval [0, M]. Set γt ≡ +� +T +n . Assume that ǫt ≤ +LM +4nT 2√ +nT . Then with probability at least 1 − δ over +S, the average output ¯wT = 1 +T +�T +t=1 wt of M-SPP (Algorithm 1) satisfies +|R( ¯wT ) − RS( ¯wT )| ≲ (LM + G +√ +LM) log(N) log(1/δ) +√ +nT ++ M +� +log (1/δ) +nT +. +We need the following lemma essentially from Bousquet et al. (2020, Corollary 8) that gives a +near-tight generalization bound for a learning algorithm that is uniformly stable with respect to +loss function. +Lemma 12 (Bousquet et al. (2020)). Suppose that a learning algorithm Aw, parameterized by w, +satisfies |ℓ(AwS(x), y) − ℓ(AwS′(x), y)| ≤ ̺ for any (x, y) ∈ X × Y and S .= S′. Assume the loss +function satisfies 0 ≤ ℓ(y′, y) ≤ M for all y, y′. Then for any δ ∈ (0, 1), with probability at least +1 − δ over an i.i.d. data set S of size N, +|R(AwS) − RS(AwS)| ≲ ̺ log(N) log +�1 +δ +� ++ M +� +log (1/δ) +N +. +With this lemma in place, we can prove the main result in Theorem 7 +Proof of Theorem 7. Let S = {St}t∈[T] and S′ = {S′ +t}t∈[T] be two sets of data minibatches satisfying +S .= S′. Note that γt ≡ γ = +� +T +n . Then according to Proposition 1 the average output ¯wT and ¯w′ +T +respectively generated by Algorithm 1 over S and S′ satisfy +sup +S,S′ ∥ ¯wT − ¯w′ +T ∥ ≤ 4 +√ +2LM +nγ ++ +T +� +t=1 +2 +�2ǫt +γ ≤ 5 +√ +2LM +nγ += 5 +√ +2LM +√ +nT +. +where in the last but one inequality we have used the condition ǫt ≤ +LM +4nT 2√ +N . It follows that +|ℓ( ¯wT ; z) − ℓ( ¯w′ +T ; z)| ≤ +√ +2ML∥ ¯wT − ¯w′ +T ∥ ≤ 10LM +√ +nT +, +where we have used ℓ(·; ·) is +√ +2LM-Lipschitz with respect to its first argument (which is implied +by Lemma 2). In view of Assumption 3 we have +|r( ¯wT ) − r( ¯w′ +T )| ≤ G∥ ¯wT − ¯w′ +T ∥ ≤ 5G +√ +2LM +√ +nT +. +This preceding two inequalities indicate that M-SPP is 10LM+5G +√ +2LM +√ +nT +-uniformly stable with respect +to the composite loss function ℓ + r. By invoking Lemma 12 to M-SPP we obtain that +|R(wS) − RS(wS)| ≲ (LM + G +√ +LM) log(nT) +√ +nT +log +�1 +δ +� ++ M +� +log (1/δ) +nT +. +The proof is concluded. +48 + +References +Alekh Agarwal, Peter L. Bartlett, Pradeep Ravikumar, and Martin J. Wainwright. Information- +theoretic lower bounds on the oracle complexity of stochastic convex optimization. IEEE Trans. +Inf. Theory, 58(5):3235–3249, 2012. 3, 6 +Zeyuan Allen-Zhu. Katyusha: The first direct acceleration of stochastic gradient methods. J. Mach. +Learn. Res., 18:221:1–221:51, 2017. 7 +Mihai Anitescu. Degenerate nonlinear programming with a quadratic growth condition. SIAM J. +Optim., 10(4):1116–1135, 2000. 9 +Hilal Asi and John C Duchi. The importance of better models in stochastic optimization. Proceed- +ings of the National Academy of Sciences, 116(46):22924–22930, 2019a. 2, 6, 13, 35 +Hilal Asi and John C. Duchi. Stochastic (approximate) proximal point methods: Convergence, +optimality, and adaptivity. SIAM J. Optim., 29(3):2257–2290, 2019b. 2, 3, 6, 13, 18, 20, 35 +Hilal Asi, Karan N. Chadha, Gary Cheng, and John C. Duchi. Minibatch stochastic approximate +proximal point methods. +In Advances in Neural Information Processing Systems (NeurIPS), +virtual, 2020. 2, 3, 5, 7, 18, 20, 24 +Francis R. Bach and Eric Moulines. Non-asymptotic analysis of stochastic approximation algorithms +for machine learning. In Advances in Neural Information Processing Systems (NIPS), pages 451– +459, Granada, Spain, 2011. 6 +Peter L Bartlett, Olivier Bousquet, and Shahar Mendelson. Local rademacher complexities. The +Annals of Statistics, 33(4):1497–1537, 2005. 7 +Raef Bassily, Vitaly Feldman, Crist´obal Guzm´an, and Kunal Talwar. Stability of stochastic gradient +descent on nonsmooth convex losses. In Advances in Neural Information Processing Systems +(NeurIPS), virtual, 2020. 8 +Dimitri P. Bertsekas. Incremental proximal methods for large scale convex optimization. Math. +Program., 129(2):163–195, 2011. 3, 6, 18 +L´eon Bottou and Olivier Bousquet. The tradeoffs of large scale learning. In Advances in Neural +Information Processing Systems (NIPS), pages 161–168, Vancouver, Canada, 2007. 7 +L´eon Bottou, Frank E. Curtis, and Jorge Nocedal. Optimization methods for large-scale machine +learning. SIAM Rev., 60(2):223–311, 2018. 6 +Olivier Bousquet and Andr´e Elisseeff. Stability and generalization. J. Mach. Learn. Res., 2:499–526, +2002. 4, 7, 16 +49 + +Olivier Bousquet, Yegor Klochkov, and Nikita Zhivotovskiy. Sharper bounds for uniformly stable +algorithms. In Proceedings of the Conference on Learning Theory (COLT), pages 610–626, Virtual +Event [Graz, Austria], 2020. 7, 48 +Karan N. Chadha, Gary Cheng, and John C. Duchi. +Accelerated, optimal and parallel: Some +results on model-based stochastic optimization. In Proceedings of the International Conference +on Machine Learning (ICML), pages 2811–2827, Baltimore, MD, 2022. 25 +Ronan Collobert, Samy Bengio, and Yoshua Bengio. A parallel mixture of svms for very large +scale problems. In Advances in Neural Information Processing Systems (NIPS), pages 633–640, +Vancouver, Canada], 2001. 23 +Koby Crammer, Ofer Dekel, Joseph Keshet, Shai Shalev-Shwartz, and Yoram Singer. +Online +passive-aggressive algorithms. J. Mach. Learn. Res., 7:551–585, 2006. 6 +Damek Davis and Dmitriy Drusvyatskiy. Stochastic model-based minimization of weakly convex +functions. SIAM J. Optim., 29(1):207–239, 2019. 2, 3, 4, 5, 6, 11, 18, 20 +Aaron Defazio, Francis R. Bach, and Simon Lacoste-Julien. +SAGA: A fast incremental gradi- +ent method with support for non-strongly convex composite objectives. In Advances in Neural +Information Processing Systems (NIPS), pages 1646–1654, Montreal, Canada, 2014. 7 +Qi Deng and Wenzhi Gao. Minibatch and momentum model-based methods for stochastic weakly +convex optimization. In Advances in Neural Information Processing Systems (NeurIPS), pages +23115–23127, virtual, 2021. 7, 8, 25 +Aymeric Dieuleveut, Nicolas Flammarion, and Francis R. Bach. Harder, better, faster, stronger +convergence rates for least-squares regression. J. Mach. Learn. Res., 18:101:1–101:51, 2017. 4, +18, 19, 20 +Dmitriy Drusvyatskiy and Adrian S. Lewis. Error bounds, quadratic growth, and linear convergence +of proximal methods. Math. Oper. Res., 43(3):919–948, 2018. 4 +John C. Duchi and Feng Ruan. Stochastic methods for composite and weakly convex optimization +problems. SIAM J. Optim., 28(4):3229–3259, 2018. 18 +John C. Duchi, Shai Shalev-Shwartz, Yoram Singer, and Ambuj Tewari. Composite objective mirror +descent. In Proceedings of the 23rd Conference on Learning Theory (COLT), pages 14–26, Haifa, +Israel, 2010. 6 +Vitaly Feldman and Jan Vondr´ak. +Generalization bounds for uniformly stable algorithms. +In +Advances in Neural Information Processing Systems (NeurIPS), pages 9770–9780, Montr´eal, +Canada, 2018. 4, 7 +50 + +Vitaly Feldman and Jan Vondr´ak. High probability generalization bounds for uniformly stable algo- +rithms with nearly optimal rate. In Proceedings of the Conference on Learning Theory (COLT), +pages 1270–1279, Phoenix, AZ, 2019. 7, 16 +Roy Frostig, Rong Ge, Sham M. Kakade, and Aaron Sidford. Competing with the empirical risk +minimizer in a single pass. In Proceedings of The 28th Conference on Learning Theory (COLT), +pages 728–763, Paris, France, 2015. 7 +Saeed Ghadimi and Guanghui Lan. +Optimal stochastic approximation algorithms for strongly +convex stochastic composite optimization I: A generic algorithmic framework. SIAM J. Optim., +22(4):1469–1492, 2012. 6 +Isabelle Guyon, Steve R. Gunn, Asa Ben-Hur, and Gideon Dror. Result analysis of the NIPS 2003 +feature selection challenge. In Advances in Neural Information Processing Systems (NIPS), pages +545–552, Vancouver, Canada], 2004. 23 +Moritz Hardt, Ben Recht, and Yoram Singer. Train faster, generalize better: Stability of stochastic +gradient descent. +In Proceedings of the 33nd International Conference on Machine Learning +(ICML), pages 1225–1234, New York City, NY, 2016. 7, 16 +Chonghai Hu, James T. Kwok, and Weike Pan. Accelerated gradient methods for stochastic op- +timization and online learning. In Advances in Neural Information Processing Systems (NIPS), +pages 781–789, Vancouver, Canada, 2009. 6 +Martin Jaggi, Virginia Smith, Martin Tak´ac, Jonathan Terhorst, Sanjay Krishnan, Thomas Hof- +mann, and Michael I. Jordan. Communication-efficient distributed dual coordinate ascent. In Ad- +vances in Neural Information Processing Systems (NIPS), pages 3068–3076, Montreal, Canada, +2014. 7 +Rie Johnson and Tong Zhang. Accelerating stochastic gradient descent using predictive variance +reduction. In Advances in Neural Information Processing Systems (NIPS), pages 315–323, Lake +Tahoe, NV, 2013. 3, 7 +Ellango Jothimurugesan, Ashraf Tahmasbi, Phillip B. Gibbons, and Srikanta Tirthapura. Variance- +reduced stochastic gradient descent on streaming data. +In Advances in Neural Information +Processing Systems (NeurIPS), pages 9928–9937, Montr´eal, Canada, 2018. 7 +Hamed Karimi, Julie Nutini, and Mark Schmidt. Linear convergence of gradient and proximal- +gradient methods under the polyak-�lojasiewicz condition. In Proceedings of the European Con- +ference on Machine Learning and Knowledge Discovery in Databases (ECML/PKDD), Part I, +pages 795–811, Riva del Garda, Italy, 2016. 4, 9 +Hiroyuki Kasai. SGDLibrary: A MATLAB library for stochastic optimization algorithms. J. Mach. +Learn. Res., 18:215:1–215:5, 2017. 23 +51 + +Yegor Klochkov and Nikita Zhivotovskiy. Stability and deviation optimal risk bounds with con- +vergence rate o(1/n). In Advances in Neural Information Processing Systems (NeurIPS), pages +5065–5076, virtual, 2021. 7 +Vladimir Koltchinskii. Local rademacher complexities and oracle inequalities in risk minimization. +The Annals of Statistics, 34(6):2593–2656, 2006. 7 +Brian Kulis and Peter L. Bartlett. Implicit online learning. In Proceedings of the 27th International +Conference on Machine Learning (ICML), pages 575–582, Haifa, Israel, 2010. 6 +Andrei Kulunchakov and Julien Mairal. A generic acceleration framework for stochastic composite +optimization. In Advances in Neural Information Processing Systems (NeurIPS), pages 12556– +12567, Vancouver, Canada, 2019. 6 +Guanghui Lan. An optimal method for stochastic composite optimization. Math. Program., 133 +(1-2):365–397, 2012. 6 +Jason D. Lee, Qihang Lin, Tengyu Ma, and Tianbao Yang. Distributed stochastic variance reduced +gradient methods by sampling extra data with replacement. J. Mach. Learn. Res., 18:122:1– +122:43, 2017. 7 +Erich L Lehmann and George Casella. Theory of point estimation. Springer Science & Business +Media, 2006. 7 +Yunwen Lei and Yiming Ying. Fine-grained analysis of stability and generalization for stochastic +gradient descent. +In Proceedings of the 37th International Conference on Machine Learning +(ICML), pages 5809–5819, Virtual Event, 2020. 7, 11, 13, 19, 20 +Mu Li, Tong Zhang, Yuqiang Chen, and Alexander J. Smola. Efficient mini-batch training for +stochastic optimization. In Proceedings of the 20th ACM SIGKDD International Conference on +Knowledge Discovery and Data Mining (KDD), pages 661–670, New York, NY, 2014. 7, 11, 24 +C. McDiarmid. Surveys in combinatorics, 1989: On the method of bounded differences. 1989. 46 +Song Mei, Yu Bai, and Andrea Montanari. The landscape of empirical risk for nonconvex losses. +The Annals of Statistics, 46(6A):2747–2774, 2018. 7 +Sayan Mukherjee, Partha Niyogi, Tomaso A. Poggio, and Ryan M. Rifkin. Learning theory: sta- +bility is sufficient for generalization and necessary and sufficient for consistency of empirical risk +minimization. Adv. Comput. Math., 25(1-3):161–193, 2006. 7 +Sahand N Negahban, Pradeep Ravikumar, Martin J Wainwright, Bin Yu, et al. A unified framework +for high-dimensional analysis of m-estimators with decomposable regularizers. Statistical Science, +27(4):538–557, 2012. 2 +52 + +Arkadi Nemirovski, Anatoli B. Juditsky, Guanghui Lan, and Alexander Shapiro. Robust stochastic +approximation approach to stochastic programming. SIAM J. Optim., 19(4):1574–1609, 2009. 6 +Arkaddii S Nemirovskii and Yu E Nesterov. Optimal methods of smooth convex minimization. +USSR Computational Mathematics and Mathematical Physics, 25(2):21–30, 1985. 12 +Andrei Patrascu and Ion Necoara. Nonasymptotic convergence of stochastic proximal point methods +for constrained convex optimization. J. Mach. Learn. Res., 18:198:1–198:42, 2017. 2, 3, 5, 6, 18, +20 +Alexander Rakhlin, Ohad Shamir, and Karthik Sridharan. Making gradient descent optimal for +strongly convex stochastic optimization. In Proceedings of the 29th International Conference on +Machine Learning (ICML), Edinburgh, Scotland, UK, 2012. 4, 6, 18, 20 +Pradeep Ravikumar, John Lafferty, Han Liu, and Larry Wasserman. +Sparse additive models. +Journal of the Royal Statistical Society: Series B (Statistical Methodology), 71(5):1009–1030, +2009. 2 +James Renegar and Benjamin Grimmer. A simple nearly optimal restart scheme for speeding up +first-order methods. Found. Comput. Math., 22(1):211–256, 2022. 12 +Herbert Robbins and Sutton Monro. A stochastic approximation method. The Annals of Mathe- +matical Statistics, pages 400–407, 1951. 6 +Herbert Robbins and David Siegmund. A convergence theorem for non negative almost super- +martingales and some applications. In Optimizing methods in statistics, pages 233–257. 1971. +35 +Mark Schmidt, Nicolas Le Roux, and Francis R. Bach. Convergence rates of inexact proximal- +gradient methods for convex optimization. In Advances in Neural Information Processing Systems +(NIPS), pages 1458–1466, Granada, Spain, 2011. 40 +Shai Shalev-Shwartz, Ohad Shamir, Nathan Srebro, and Karthik Sridharan. Learnability, stability +and uniform convergence. J. Mach. Learn. Res., 11:2635–2670, 2010. 7, 11 +Ohad Shamir, Nathan Srebro, and Tong Zhang. Communication-efficient distributed optimization +using an approximate newton-type method. In Proceedings of the 31th International Conference +on Machine Learning (ICML), pages 1000–1008, Beijing, China, 2014. 3, 7 +Nathan Srebro, Karthik Sridharan, and Ambuj Tewari. +Smoothness, low noise and fast rates. +In Advances in Neural Information Processing Systems (NIPS), pages 2199–2207, Vancouver, +Canada, 2010. 7, 8, 10, 11, 13, 19, 20, 27 +53 + +Panos Toulis and Edoardo M Airoldi. Asymptotic and finite-sample properties of estimators based +on stochastic gradients. The Annals of Statistics, 45(4):1694–1727, 2017. 5, 6 +Panos Toulis, Dustin Tran, and Edoardo M. Airoldi. Towards stability and optimality in stochastic +gradient descent. In Proceedings of the 19th International Conference on Artificial Intelligence +and Statistics (AISTATS), pages 1290–1298, Cadiz, Spain, 2016. 6, 18 +Alexandre B Tsybakov. Introduction to nonparametric estimation. Springer Science & Business +Media, 2008. 5, 18 +Sara A Van de Geer. High-dimensional generalized linear models and the lasso. The Annals of +Statistics, 36(2):614–645, 2008. 2 +Vladimir Vapnik. An overview of statistical learning theory. IEEE Trans. Neural Networks, 10(5): +988–999, 1999. 7 +Martin J. Wainwright. Sharp thresholds for high-dimensional and noisy sparsity recovery using +l1-constrained quadratic programming (lasso). IEEE Trans. Inf. Theory, 55(5):2183–2202, 2009. +21 +Jialei Wang, Mladen Kolar, Nathan Srebro, and Tong Zhang. Efficient distributed learning with +sparsity. +In Proceedings of the 34th International Conference on Machine Learning (ICML), +pages 3636–3645, Sydney, Australia, 2017a. 7, 24 +Jialei Wang, Weiran Wang, and Nathan Srebro. Memory and communication efficient distributed +stochastic optimization with minibatch prox. In Proceedings of the 30th Conference on Learning +Theory (COLT), pages 1882–1919, Amsterdam, The Netherlands, 2017b. 2, 3, 4, 5, 7, 8, 10, 11, +13, 14, 19, 20, 26 +Blake E. Woodworth and Nathan Srebro. An even more optimal stochastic optimization algorithm: +Minibatching and interpolation learning. In Advances in Neural Information Processing Systems +(NeurIPS), pages 7333–7345, virtual, 2021. 4, 19, 20 +Lin Xiao and Tong Zhang. A proximal stochastic gradient method with progressive variance re- +duction. SIAM J. Optim., 24(4):2057–2075, 2014. 3, 7, 14 +Yang You, Jing Li, Sashank J. Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiao- +dan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep +learning: Training BERT in 76 minutes. In Proceedings of the 8th International Conference on +Learning Representations (ICLR), Addis Ababa, Ethiopia, 2020. 11 +Xiao-Tong Yuan and Ping Li. On convergence of distributed approximate newton methods: Glob- +alization, sharper bounds and beyond. J. Mach. Learn. Res., 21:206:1–206:51, 2020. 3 +54 + +Xiao-Tong Yuan and Ping Li. Stability and risk bounds of iterative hard thresholding. IEEE Trans. +Inf. Theory, 68(10):6663–6681, 2022. 8 +Lijun Zhang, Tianbao Yang, and Rong Jin. +Empirical risk minimization for stochastic convex +optimization: o(1/n)-and o(1/n2)-type of risk bounds. In Conference on Learning Theory, pages +1954–1979, 2017. 7, 19, 20 +Tong Zhang. Leave-one-out bounds for kernel methods. Neural Comput., 15(6):1397–1437, 2003. 7 +Tong Zhang. Solving large scale linear prediction problems using stochastic gradient descent al- +gorithms. +In Proceedings of the Twenty-first International Conference on Machine Learning +(ICML), Banff, Alberta, Canada, 2004. 6 +Yuchen Zhang and Xiao Lin. DiSCO: Distributed optimization for self-concordant empirical loss. In +Proceedings of the 32nd International Conference on Machine Learning (ICML), pages 362–370, +Lille, France, 2015. 3, 7 +Kaiwen Zhou, Lai Tian, Anthony Man-Cho So, and James Cheng. Practical schemes for finding +near-stationary points of convex finite-sums. In Proceedings of the International Conference on +Artificial Intelligence and Statistics (AISTATS), pages 3684–3708, Virtual Event, 2022. 12 +Pan Zhou, Xiaotong Yuan, Huan Xu, Shuicheng Yan, and Jiashi Feng. Efficient meta learning +via minibatch proximal update. In Advances in Neural Information Processing Systems (2019), +pages 1532–1542, Vancouver, Canada, 2019. 2 +55 + diff --git a/GtE1T4oBgHgl3EQfXQSk/content/tmp_files/load_file.txt b/GtE1T4oBgHgl3EQfXQSk/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..05fcf4becfffe62c348af57fcb9e5dbb3e26b5a3 --- /dev/null +++ b/GtE1T4oBgHgl3EQfXQSk/content/tmp_files/load_file.txt @@ -0,0 +1,1759 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf,len=1758 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='03125v1 [stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ML] 9 Jan 2023 Sharper Analysis for Minibatch Stochastic Proximal Point Methods: Stability, Smoothness, and Deviation Xiao-Tong Yuan and Ping Li Cognitive Computing Lab Baidu Research No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 10 Xibeiwang East Road, Beijing 100193, China 10900 NE 8th St.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bellevue, Washington 98004, USA E-mail: {xtyuan1980, pingli98}@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='com Abstract The stochastic proximal point (SPP) methods have gained recent attention for stochastic opti- mization, with strong convergence guarantees and superior robustness to the classic stochastic gradient descent (SGD) methods showcased at little to no cost of computational overhead added.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this article, we study a minibatch variant of SPP, namely M-SPP, for solving convex com- posite risk minimization problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The core contribution is a set of novel excess risk bounds of M-SPP derived through the lens of algorithmic stability theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly under smooth- ness and quadratic growth conditions, we show that M-SPP with minibatch-size n and iteration count T enjoys an in-expectation fast rate of convergence consisting of an O � 1 T 2 � bias decaying term and an O � 1 nT � variance decaying term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the small-n-large-T setting, this result substan- tially improves the best known results of SPP-type approaches by revealing the impact of noise level of model on convergence rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the complementary small-T -large-n regime, we provide a two-phase extension of M-SPP to achieve comparable convergence rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Moreover, we derive a near-tight high probability (over the randomness of data) bound on the parameter estimation error of a sampling-without-replacement variant of M-SPP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Numerical evidences are provided to support our theoretical predictions when substantialized to Lasso and logistic regression models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1 1 Introduction We consider the following problem of regularized risk minimization over a closed convex subset W ⊆ Rp: min w∈W R(w) := Rℓ(w) + r(w), where Rℓ(w) := Ez∼D[ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z)], (1) where ℓ : W × Z �→ R+ is a non-negative convex loss function whose value ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) measures the loss of a hypothesis, parameterized by w ∈ W, evaluated over a data sample z ∈ Z, D represents a distribution over Z, and r : W �→ R+ is a data-independent non-negative convex function whose value r(w) measures certain complexity of the hypothesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We are particularly interested in the situation where the composite population risk R is strongly convex around its minimizers, though in this setting the terms Rℓ and r are not necessarily required to be so simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For an instance, the ℓ1-norm regularizer r(w) = µ∥w∥1 or its grouped variants are often used for sparse generalized linear models learning with quadratic or logistic loss functions (Van de Geer, 2008;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Ravikumar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Negahban et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In statistical machine learning, it is usually assumed that the estimator only has access to, either as a batch training set or in an online/incremental manner, a collection S = {zi}N i=1 of i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' random data instances drawn from D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The goal is to compute a stochastic estimator ˆwS based on the knowledge of S, hopefully that it generalizes well as a near minimizer of the population risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' More precisely, we aim at deriving a suitable law of large numbers, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', a sample size vanishing rate δN so that the excess risk at ˆwS satisfies R( ˆwS) − R∗ ≤ δN in expectation or with high probability over S where R∗ := minw∈W R(w) represents the minimal value of composite risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this work, inspired by the recent remarkable success of the stochastic proximal point (SPP) algorithms (Patrascu and Necoara, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi and Duchi, 2019a,b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy, 2019) and their minibatch extensions (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020), we provide a sharper generalization performance analysis for a class of minibatch SPP methods for solving the stochastic composite risk minimization problem (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Algorithm and Motivation of Study Minibatch Stochastic Proximal Point Algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let St = {zi,t}n i=1 be a minibatch of n i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' samples drawn from distribution D at time instance t ≥ 1 and denote RSt(w) := 1 n n � i=1 ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) + r(w) as the regularized empirical risk over St.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We consider the Minibatch Stochastic Proximal Point (M- SPP) algorithm, as outlined in Algorithm 1, for composite risk minimization based on a sequence of data minibatches S = {St}T t=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The precision value ǫt in the algorithm quantifies the sub-optimality of wt for solving the inner-loop regularized ERM over the minibatch St.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The M-SPP algorithm is generic and it encompasses several existing SPP methods as special cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For example in the 2 Algorithm 1: Minibatch Stochastic Proximal Point (M-SPP) Input : Regularization modulus {γt}t≥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Output: ¯wT as a weighted average of {wt}1≤t≤T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Initialization Specify a value of w0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Typically w0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' for t = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T do Sample a minibatch St := {zi,t}n i=1 i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ∼ Dn and estimate wt satisfying Ft(wt) ≤ min w∈W � Ft(w) := RSt(w) + γt 2 ∥w − wt−1∥2� + ǫt, (2) where RSt(w) := 1 n �n i=1 ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) + r(w) and ǫt ≥ 0 measures the sub-optimality of estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' end extreme case when n = 1 and ǫt ≡ 0 M-SPP reduces to a composite variant of the standard SPP method (Bertsekas, 2011), as formulated in (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In general, the recursion update formulation (2) can be regarded as a natural composite extension of the existing minibatch stochastic proximal point methods for statistical estimation (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Prior results and limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The present study focuses on the generalization analysis of M-SPP for convex composite risk optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Recently, it has been shown by Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2020, Theorem 2) that if the instantaneous loss functions are strongly convex with respect to the parameters, then the M-SPP algorithm converges at the rate of O � log(nT) nT � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Prior to that, Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017b, Theorem 5) proved an O( 1 nT ) rate for M-SPP when the individual loss functions are Lipschitz continuous and strongly convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' There results, among others for SPP (Patrascu and Necoara, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy, 2019), commonly require that each instantaneous loss should be strongly convex which is too stringent to be fulfilled in high-dimensional or infinite spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For an instance, the quadratic loss ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) = 1 2(w⊤x − y)2 over a feature-label pair z = (x, y) is convex but in general not strongly convex, although the population risk Rℓ(w) = 1 2E(y − w⊤x)2 is strongly convex provided that the covariance matrix of random feature x is non-degenerate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the meanwhile, the Lipschitz-loss assumption made for the analysis (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b, Theorem 5) limits its applicability to smooth losses like quadratic loss, not to mention an interaction between Lipschitz continuity and strong convexity (Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi and Duchi, 2019b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The above mentioned deficiencies of prior results motivate us to investigate the convergence be- havior of M-SPP for composite risk minimization beyond the setting where each individual loss is strongly convex and Lipschitz continuous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From the perspective of optimization, smoothness is es- sential for establishing strong convergence guarantees for solving the inner-loop strongly convex risk minimization subproblems in (6), e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', with variance reduced stochastic algorithms (Johnson and Zhang, 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Xiao and Zhang, 2014) or communication-efficient distributed optimization algorithms (Shamir et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Zhang and Lin, 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Yuan and Li, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Aiming at covering such an important yet less un- 3 derstood problem regime, we focus our study on analyzing the convergence behavior of M-SPP when the convex loss functions are smooth and the risk function exhibits quadratic growth property (see Assumption 2 for a formal definition).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 Our Contributions and Main Results The main contribution of the present work is a sharper non-asymptotic convergence analysis of the M-SPP algorithm through the lens of algorithmic stability theory (Bousquet and Elisseeff, 2002;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Feldman and Vondr´ak, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let W ∗ := {w ∈ W : R(w) = R∗} be the set of minimizers of the composite population risk R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We are particularly interested in the regime where the loss function ℓ is convex and smooth but not necessarily Lipschitz (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', quadratic loss), while the population risk R satisfies the quadratic growth condition, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', R(w) − R∗ ≥ λ 2 minw∗∈W ∗ ∥w − w∗∥2, ∀w ∈ W, for some λ > 0, which can be satisfied by strongly convex objectives, and various other statistical estimation problems (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', Karimi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Drusvyatskiy and Lewis, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For the family of L-smooth loss functions, with γt = O(λρt) for an arbitrary scalar ρ ∈ (0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5] and ǫt ≡ 0, we show in Theorem 1 that the excess risk at the weighted average output ¯wT = 2 T(T+1) �T t=1 twt is in expectation upper bounded by the following bound: R( ¯wT ) − R∗ ≲ ρ [R(w0) − R∗] T 2 + LR∗ ρλnT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (3) In this composite bound, the first bias component associated with initial gap R(w0) − R∗ has a decaying rate O � 1 T 2 � and the second variance component associated with R∗ converges at the rate of O � 1 λnT � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The variance decaying rate actually matches the corresponding optimal rates of the SGD-type methods for strongly convex optimization (Rakhlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Dieuleveut et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Woodworth and Srebro, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Also, such an O � 1 T 2 + 1 λnT � bounds matches those bounds for SPP (Davis and Drusvyatskiy, 2019) or M-SPP (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b) which are in contrast obtained under a substantially stronger assumption that each individual loss function should be strongly convex and Lipschitz as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the realizable or near realizable machine learning regimes where R∗ equals to or approximates zero, the variance term in (3) would be sharper than those bounds of Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017b);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To our best knowledge, the bound in (3) for smooth and convex loss functions is new to the SPP-type methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' More generally for arbitrary convex risk functions, we present in Theorem 3 an O( 1 √ nT ) excess risk bound for exact M-SPP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Further, as shown in Theorem 4 and Theorem 5, similar results can be extended to the inexact M-SPP given that the inner-loop sub-optimality is sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the regime T ≪ n which is of special interest for off-line incremental learning with large data batches, setting a near-optimal value ρ = � T nλ in the excess risk bound (3) yields an O � 1 T √ λnT � rate of convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This rate, in terms of n, is substantially slower than the O( 1 λnT ) rate available for the previous small-n-large-T setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In order to address such a deficiency, we propose a two- phase variant of M-SSP (see Algorithm 2) to boost its performance in the small-T-large-n regime: 4 in the first phase, M-SPP with sufficiently small minibatch-size is invoked over S1 to obtain w1, and then initialized by w1 the second phase applies M-SPP to the rest minibatches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then in Theorem 2 we show that the in-expectation excess risk at the output of the second phase can be accelerated to scale as R( ¯wT ) − R∗ ≲ L2(R(w0) − R∗) λ2n2T 2 + LR∗ λnT , (4) which holds regardless to the mutual strength of minibatch size n and iteration count T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In addition to the above in-expectation risk bounds, we further derive a high-probability model estimation error bound of M-SPP based on algorithmic stability theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Our deviation analysis is carried out over a sampling-without-replacement variant of M-SPP (see Algorithm 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For popula- tion risk with quadratic growth property, up to an additive term on the inner-loop sub-optimality ǫt, we establish in Theorem 6 the following deviation bound on the estimation error D( ¯wT , W ∗) that holds with probability at least 1 − δ over S while in expectation over the randomness of sampling: D( ¯wT , W ∗) ≲ � L log(1/δ) log(T) λ √ nT + � [R(w0) − R∗] λT 2 + LR∗ ρλ2nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' When T = Ω(n), up to the logarithmic factors, this above bound matches (in terms of the total sample size N = nT) the known minimax lower bounds for statistical estimation even without computational limits (Tsybakov, 2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To highlight the core contribution of this work, the following three new insights into M-SPP make our results distinguished from the best known of SPP-type methods for convex optimization: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' First and for most, the fast rates in (3) and (4) reveal the impact of noise level, as quanti- fied by R∗, to convergence rate which has not been previously known for SPP-type methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' These bounds are valid for smooth losses and thus complement the previous ones for Lipschitz losses (Patrascu and Necoara, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Second, the risk bounds in (3) and (4) are established under the quadratic growth condition of population risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is substantially weaker than the instantaneous-loss-wise strong convexity assumption commonly imposed by prior analysis to achieve the comparable rates for SPP-type methods (Toulis and Airoldi, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Third, we provide a deviation analysis of M-SPP from the viewpoint of uniform algorithmic stability which to our best knowledge has not yet been addressed in the previous study on SPP-type methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We should emphasize that, while we provide some insights into the numerical aspects of M-SPP through an empirical study, this work is largely a theoretical contribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 Related Work Our work is situated at the intersection of two lines of machine learning research: stochastic optimization and algorithmic stability theory, both of which have been actively studied with a vast body of beautiful and insightful theoretical results established in literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We next incompletely review some representative work that are closely relevant to ours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stemming from the pioneering work of Robbins and Monro (1951), stochastic gradient descent (SGD) methods have been extensively studied to approximately solve a simplified version of the problem (1) with r ≡ 0 (Zhang, 2004;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Nemirovski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Rakhlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bottou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For the composite formulation, a vast body of proximal SGD methods have been developed for efficient optimization in the presence of potentially non-smooth regulariz- ers (Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Duchi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Ghadimi and Lan, 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lan, 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Kulunchakov and Mairal, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To handle the challenges associated with stepsize selection and numerical instability of SGD (Nemirovski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bach and Moulines, 2011), a number of more sophisticated meth- ods including implicit stochastic/online learning (Crammer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Kulis and Bartlett, 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Toulis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Toulis and Airoldi, 2017) and stochastic proximal point (SPP) methods (Bertsekas, 2011;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Patrascu and Necoara, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi and Duchi, 2019a,b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy, 2019) have re- cently been investigated for enhancing stability and adaptivity of stochastic (composite) optimiza- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For an example, in our considered composite optimization regime, the iteration procedure of vanilla SPP can be expressed as the following recursion form for i ≥ 1: ˆwspp i := arg min w∈W ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi) + r(w) + γi 2 ∥w − ˆwspp i−1∥2, (5) where zi ∼ D is a random data sample, γi is a regularization modulus and ∥ · ∥ stands for the Euclidean norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In contrast to standard SGD methods which are simple in per-iteration modeling but brittle to stepsize choice, the SPP methods are more accurate in objective approximation which leads to substantially improved stability to the choice of algorithm hyper-parameters while enjoying optimal guarantees on convergence (Asi and Duchi, 2019a,b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' An attractive feature of these above (proximal) stochastic optimization methods is that their convergence guarantees directly apply to the population risk and the minimax optimal rates of order O( 1 T ) are achievable after T rounds of iteration for strongly convex problems (Nemirovski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Rakhlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For large-scale machine learning, the improved memory efficiency is another practical argument in favor of stochastic over batch optimization methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' However, due to the sequential processing nature, the stochastic optimization methods tend to be less efficient for parallelization especially in distributed computing environment where excessive communication between nodes would be required for model update (Bottou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Empirical risk minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' At the opposite end of SGD-type and online learning, the following defined (composite) empirical risk minimization (ERM, a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', M-estimation) is another popularly 6 studied formulation for statistical learning (Lehmann and Casella, 2006): ˆwerm S := arg min w∈W � RS(w) := 1 N N � i=1 ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi) + r(w) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Thanks to the finite-sum structure, a large body of randomized incremental algorithms with lin- ear rates of convergence have been established for ERM including SVRG (Johnson and Zhang, 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Xiao and Zhang, 2014), SAGA (Defazio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014) and Katyusha (Allen-Zhu, 2017), to name a few.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From the perspective of distributed computation, one intrinsic advantage of ERM over SGD-type methods lies in that it can better explore the statistical correlation among data samples for designing communication-efficient distributed optimization algorithms (Jaggi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Shamir et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Zhang and Lin, 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Unlike stochastic optimization methods, the generalization performances of the batch or incremental algorithms are by nature controlled by that of ERM (Bottou and Bousquet, 2007) which has long been studied with a bunch of insightful results available (Vapnik, 1999;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bartlett et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2005;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly for strongly convex risk functions, the O( 1 N ) rate of convergence is possible for ERM (Bartlett et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2005;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Koltchinskii, 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017), though these fast rates are in general dimensionality-dependent for parametric learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It has been recognized that SGD-type and ERM-type approaches cannot dominate each other in terms of generalization, runtime, storage and parallelization efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This motivates a recent trend of trying to propose the so called stochastic model-based methods that can achieve the best of two worlds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Among others, a popular paradigm for such a purpose of combination is minibatch proximal update which in each iteration updates the model via (approximately) solving a local ERM over a stochastic minibatch (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Deng and Gao, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This strategy can be viewed as a minibatch extension to the SPP algorithm and it has been shown to attain a substantially improved trade-off between computation, communication and memory efficiency for large-scale distributed machine learning (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Alternatively, a number of online extensions of the incremental finite-sum algorithms, such as streaming SVRG (Frostig et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2015) and streaming SAGA (Jothimurugesan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2018), have been proposed for stochastic optimization with competitive guarantees to ERM but at lower cost of computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Algorithmic stability and generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since the seminal work of Bousquet and Elisseeff (2002), algorithmic stability has been extensively studied with remarkable success achieved in estab- lishing generalization bounds for strongly convex ERM estimators (Zhang, 2003;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mukherjee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Shalev-Shwartz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly, the state-of-the-art risk bounds of strongly convex ERM are offered by approaches based on the notion of uniform stability (Feldman and Vondr´ak, 2018, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bousquet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Klochkov and Zhivotovskiy, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It was shown by Hardt et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2016) that the solution obtained via (stochastic) gradient descent is stable for smooth con- vex or non-convex loss functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For non-smooth convex losses, the stability induced gener- alization bounds of SGD have been established in expectation (Lei and Ying, 2020) or devia- 7 tion (Bassily et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For learning with sparsity, algorithmic stability theory has been em- ployed to derive the generalization bounds of the popularly used iterative hard thresholding (IHT) algorithm (Yuan and Li, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Through the lens of uniform algorithmic stability, convergence rates of M-SPP have been studied for convex (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b) and weakly convex (Deng and Gao, 2021) Lipschitz losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' While sharing a similar spirit to Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017b);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Deng and Gao (2021), our analysis customized for smooth convex loss functions is considerably different and the resultant fast rates are of special interest in low-noise statistical settings (Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 Notation and Paper Organization Notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The key quantities and notations frequently used in our analysis are summarized in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Notation Definition n minibatch size T round of iteration N total number of samples visited, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', N = nT f hypothesis ℓ loss function r regularization term Rℓ population risk: Rℓ(w) := E(x,y)∼D[ℓ(fw(x), y)] R composite population risk: R(w) := Rℓ(w) + r(w) R∗ the optimal value of composite risk, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', R∗ := minw∈W R(w) W ∗ the optimal solution set of composite risk, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', W ∗ := arg minw∈W R(w) St data minibatch at time instance t SI The union of data minibatch over I, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', SI := {St}t∈I Rℓ S empirical risk over S, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', Rℓ S(w) := 1 |S| � (x,y)∈S ℓ(fw(x, y) RS composite empirical risk over S, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', RS(w) := Rℓ S(w) + r(w) ǫt precision of minibatch risk minimization at time instance t ∥w∥1 ℓ1-norm of a vector w, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', ∥w∥1 := � i |[w]i| ∥w∥ Euclidean norm of a vector w D(w, W ∗) the distance from w to W ∗, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', D(w, W ∗) = minw∗∈W ∗ ∥w − w∗∥ [T] [T] := {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T} 1{C} the indicator function of the condition C Table 1: Table of notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 8 Organization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The paper proceeds with the material organized as follows: In Section 2, we analyze the risk bounds of exact M-SPP with convex and smooth loss functions and present a two-phase variant to further improve convergence performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Section 3, we extend our analysis to the more realistic setting where inexact M-SPP iteration is allowed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Section 4, we study the high- probability estimation error bounds of M-SPP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A comprehensive comparison to some closely relevant results is highlighted in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The numerical study for theory verification and algorithm evaluation is provided in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The concluding remarks are made in Section 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' All the proofs of main results and some additional results on the iteration stability of M-SPP are relegated to appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2 A Sharper Analysis of M-SPP for Smooth Loss In this section, we analyze the convergence rate of M-SPP for smooth and convex loss functions using the tools developed in algorithmic stability theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In what follows, for the sake of notation simplicity and presentation clarity of core ideas, we assume for the time being that the inner-loop composite ERM in the M-SPP iteration procedure (2) has been solved exactly with ǫt ≡ 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', wt = arg min w∈W � Ft(w) := RSt(w) + γt 2 ∥w − wt−1∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (6) A full convergence analysis for the inexact variant (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', ǫt > 0) will be presented in the Section 3 via a slightly more involved perturbation analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Basic Assumptions We begin by introducing some basic assumptions that will be used in the analysis to follow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We say a differentiable function g : W �→ R is L-smooth if ∀s, t ∈ R, ��g(w) − g(w′) − ⟨∇g(w), w − w′⟩ �� ≤ L 2 |w − w′|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' As formally stated in the following assumption, we suppose that the individual loss functions are convex and L-smooth which can be satisfied, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', by the quadratic loss (for regression) and the logistic loss (for prediction).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assumption 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The loss function ℓ is convex and L-smooth with respect to its first argument.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Also, we assume that the regularization term r is convex over W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let us define D(w, W ∗) := minw∗∈W ∗ ∥w − w∗∥ as the distance from w to the set W ∗ of minimizers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The next assumption requires that the population risk has the characterization of quadratic growth away from the set of minimizers (Anitescu, 2000;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Karimi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The population risk function R satisfies R(w) ≥ R∗ + λ 2D2(w, W ∗), ∀w ∈ W for some λ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 9 Clearly, the quadratic growth property can be implied by the traditional strong convexity con- dition (around the minimizers) which is satisfied by a number of popular learning models including linear and logistic regression, generalized linear models, smoothed Huber losses, and various other statistical estimation problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly, Assumption 2 holds when Rℓ is strongly convex and r is convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Notice that for risk functions with quadratic growth property, the prior analysis of M-SPP for Lipschitz losses (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b) is not generally applicable because Assump- tion 2 implies that the Lipschitz constant of loss could be arbitrarily large if the infinite distance minw∗∈W ∗ ∥w − w∗∥ → ∞ is allowed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 Main Results The following theorem is our main result on the in-expectation rate of convergence of the exact M-SPP with smooth loss and quadratic growth population risk functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Recall that N = nT is the total number of data points visited up to the iteration counter T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 2 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider ǫt ≡ 0 and the weighted average output ¯wT = 2 T(T+1) �T t=1 twt in Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ρ ∈ (0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5] be an arbitrary scalar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (a) Suppose that n ≥ 64L λρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt = λρt 4 for t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 1, E [R( ¯wT ) − R∗] ≤ 4ρ [R(w0) − R∗] T 2 + 29L λρnT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) Set γt = λρt 4 + 16L n for t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 1, E [R( ¯wT ) − R∗] ≤ � 4ρ T 2 + 28L λnT � [R(w0) − R∗] + � 216L2 λ2ρ2n2T + 29L λρnT � R∗, Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The proof technique is inspired by the uniform stability arguments developed by Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017b) for Lipschitz and instance-wise strongly convex loss, with several new elements along de- veloped for handling smooth loss and quadratic growth of risk function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' As a non-trivial ingredient, we show that it is possible to extend those stability arguments to smooth losses in view of a clas- sical result from Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2010, Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1) that allows the derivative of a smooth loss to be bounded in terms of its function value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 for a full proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A few remarks on Theorem 1 are in order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Part (a), the minibatch size is required to be sufficiently large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this setting, the excess risk bound consists of two components: the first bias component associated with initial gap R(w0) − R∗ has a decaying rate O( 1 T 2) and the second variance component associated with R∗ vanishes at a dominate rate of O( 1 λnT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The variance term shows that the convergence rate can be improved in the low-noise settings where the factor of R∗ is relatively small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Extremely in the separable case with R∗ = 0, the excess risk bound of Theorem 1 would scale as fast as O( 1 T 2 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 10 Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' One disadvantage of the result in Part (a) lie in that the minibatch size is required to be sufficiently larger than the condition number of the population risk R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Contrastively, the excess risk bound in Part (b) holds for arbitrary minibatch sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The cost, however, is a relatively slower bias decaying term O( 1 T 2 + 1 λnT ) which is dominated by O( 1 λnT ) in the case of T ≫ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let N = nT be the total number of data points accessed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' When T ≫ n, the O( 1 N ) dominant rates in Theorem 1 match those prior ones for SPP-type methods (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy, 2019) which are, however, obtained under the assumption that each in- dividual loss function should be Lipschitz continuous and strongly convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In comparison to the O( 1 N ) rate established for SGD with smooth loss (Lei and Ying,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2020,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 12),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' our result in Theorem 1 is stronger and less stringent in the following senses: 1) our bound shows explicitly the impact of R∗ which usually represents the noise level of model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' and 2) we only require the popu- lation risk to have quadratic growth property while the bound of Lei and Ying (2020,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 12) not only requires the loss to be Lipschitz but also assumes the empirical risk to be strongly convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let us further look into the choice of the scalar ρ in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We focus the discussion on the part (a) and similar observations apply to the part (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We distinguish the discussion in the following two complementary cases regarding the mutual strength of minibatch-size n and round of iteration T: Case I: Small-n-large-T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that n = O(1) and T → ∞ is allowed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this case, simply setting ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5 yields the convergence rate of order O � 1 T 2 + 1 λnT � in the part (a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Case II: Small-T-large-n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that T = O(1) and n → ∞ is allowed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this setup, given that n ≥ 4T λ , then with a roughly optimal choice ρ = � T nλ the excess risk bound in Theorem 1(a) will be of the order O � 1 T √ λnT � , which is substantially slower than the previous fast rate in Case I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is intuitive because M-SPP with large minibatches behaves more like regularized ERM which is known to exhibit slow rate of convergence even for strongly convex problems (Shalev-Shwartz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Nevertheless, such a small-T-large-n setup is of special interest for off-line incremental learning with large minibatches and distributed statistical learning (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' You et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We will address this critical case in the next subsection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 11 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 A Two-Phase M-SPP Method Algorithm 2: Two-Phase M-SPP (M-SPP-TP) Input : Dataset S = {St}T t=1 in which St := {zi,t}n i=1 i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ∼ Dn, regularization modulus {γt > 0}t∈[T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Output: ¯wT as a weighted average of {wt}2≤t≤T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Initialization Specify a value of w0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Typically w0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' /* Phase-I / Divide sample S1 into disjoint minibatches of equal size m;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Run M-SPP over these minibatches to obtain the output w1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' /* Phase-II / Initialized with w1, run M-SPP over data minibatches {St}2≤t≤T with {γt}2≤t≤T to obtain the sequence {wt}2≤t≤T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To remedy the deficiencies mentioned in the previous discussion, we propose a two-phase variant of M-SSP, as outlined in Algorithm 2, to boost its performance in the small-T-large-n regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The procedure can be regarded as sort of a restarting argument (Nemirovskii and Nesterov, 1985;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Renegar and Grimmer, 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2022) for M-SPP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' More specifically, the Phase-I serves as an initialization step that invokes M-SPP to a uniform division of S1 with minibatch size m to obtain w1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then starting from w1, the Phase-II just invokes M-SPP to the consequent large minibatches {St}t≥2 which is suitable for large-scale parallelization if applicable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following theorem is a consequence of Theorem 1 to such a two-phase M-SPP procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 2 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider ǫt ≡ 0 for implementing M- SPP in both Phase-I and Phase-II of Algorithm 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider the weighted average output ¯wT = 2 (T−1)(T+2) �T t=2 twt in Phase-II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (a) Suppose that n ≥ 128L λ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set m = 128L λ in Phase-I and γt = λt 8 for implementing M-SPP in both Phase-I and Phase II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 2, ¯wT satisfies E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] λ2n2T 2 + L λnT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) Set m = O(1) in Phase-I and γt = λt 8 + 16L n for implementing M-SPP in both Phase-I and Phase-II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 2, ¯wT satisfies E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] λ2nT + L3 λ3nT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 for a proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 12 Remark 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The part (a) of Theorem 2 suggests that when the minibatch size is sufficiently large, the excess risk bound of two-phase M-SPP has a bias decaying term of scale O � 1 n2T 2 � and a variance term that decays at the rate of O( 1 nT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The rate is valid even when the scales of T relatively small, and thus is stronger than the O � 1 T √ nT � rate implied by Theorem 1 for the vanilla M-SPP in the small-T-large-n regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It is worth to mention that both the bias and variance components in our bound for M-SPP are faster than those derived for strongly convex ERM (Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The excess risk bound in Part (b) of Theorem 2 is valid for arbitrary minibatch sizes, but at the cost of a relatively slower O( 1 nT ) bias decaying rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 Results for Arbitrary Convex Risks We further analyze the proposed M-SPP algorithm when the loss function ℓ is convex and smooth, but without requiring that the composite risk R has quadratic growth property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following is our main result in such a generic setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumption 1 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≡ γ ≥ 16L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ¯wT = 1 T �T t=1 wt be the average output of Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then E [R( ¯wT ) − R∗] ≲ γ T D2(w0, W ∗) + L γnR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly for γ = � T n + 16L n , it holds that E [R( ¯wT ) − R∗] ≲ � 1 √ nT + L nT � D2(w0, W ∗) + L √ nT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 for a proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The first bound of Theorem 3 implies that for any ǫ ∈ (0, 1), by setting γ = O � L ǫn � , R( ¯wt) converges to (1+ǫ)R∗ at the rate of O( 1 nTǫ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This bound matches the results of Lei and Ying (2020, Theorem 4) for smooth SGD method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The second bound of Theorem 3 further shows that by setting γ = O( � T n + L n), the excess risk of ¯wT decays at the rate of O( 1 √ nT ) for both bias and variance terms, which matches in order the corresponding bound derived for Lipschitz-loss (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b, Theorem 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To our knowledge, such a bias-variance composite rate of convergence is new for SPP-type methods with convex and smooth loss functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Analogous to the robustness analysis of SPP (Asi and Duchi, 2019a,b), we have also analyzed the iteration stability of M-SPP for convex losses with respect to the choice of regularization modulus γt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The corresponding results, which can be found in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4, confirm that the choice of γt is insensitive to the gradient scale of loss functions for generating a non-divergent sequence of estimation errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 13 3 Perturbation Analysis for Inexact M-SPP In the preceding section, we have analyzed the convergence rates of M-SPP under the assumption that the inner-loop proximal ERM subproblems constructed in its iteration procedure (2) are solved exactly, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', ǫt ≡ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To make our analysis more practical, we further provide in this section a perturbation analysis of M-SPP when the inner-loop proximal ERM subproblems are only required to be solved approximately up to certain precision ǫt > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' As a starting point, we need to impose the following Lipschitz continuity assumption on the regularization term r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assumption 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The regularization term r is Lipschitz continuous over W, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', |r(w) − r(w′)| ≤ G∥w − w′∥, ∀w, w′ ∈ W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For example, the ℓ1-norm regularizer r(w) = µ∥w∥1 satisfies this assumption with respect to Euclidean norm as |r(w) − r(w′)| = µ|∥w∥1 − ∥w′∥1| ≤ µ∥w − w′∥1 ≤ µ√p∥w − w′∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following theorem is our main result on the rate of convergence of the inexact M-SPP for composite stochastic convex optimization with smooth losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1, 2 and 3 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ρ ∈ (0, 1/4] be an arbitrary scalar and set γt = λρt 4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that n ≥ 76L λρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ ǫ nt4 for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 1, the weighted average output ¯wt = 2 T(T+1) �T t=1 twt of Algorithm 1 satisfies E [R( ¯wt) − R∗] ≲ ρ T 2 (R(w0) − R∗) + L λρnT R∗ + √ǫ T 2 � L λρ + G � 1 λρ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 for a proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We would like to highlight that our perturbation analysis for smooth loss is considerably different from that of Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017b) developed for Lipschitz loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is mainly because in the smooth loss case, the change of loss could no longer be upper bounded by the change of prediction, and thus we need to make a more careful treatment to the perturbation caused by inexact minimization of the regularized minibatch empirical risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We provide in order a few remarks on Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 4 suggests that the excess risk bound of exact M-SPP in the part (a) of The- orem 1 can be extended to its inexact version, provided that the inner-loop minibatch ERMs (2) are solved to sufficient accuracy, say, ǫt ≤ O � 1 nt4 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Similarly, the result in the part (b) of Theo- rem 1 for arbitrary minibatch sizes can also be extended to the inexact M-SPP, which is omitted to avoid redundancy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since the inner-loop minibatch ERMs are strongly convex and the loss functions are smooth, in average the desired accuracy can be attained in logarithmic time O � log � 1 ǫt �� via variance-reduced SGD methods (Xiao and Zhang, 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 14 Remark 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Analogous to the discussions at the end of Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2, by specifying the choice of ρ we can derive a direct consequent result of Theorem 4 which more explicitly shows the rate of convergence with respect to N = nT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Also for the two-phase M-SPP, in view of Theorem 4 we can show that the bound in Theorem 2 can be extended to the inexact setting if the minibatch optimization is sufficiently accurate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' These extensions are more or less straightforward and thus are omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the following theorem, we provide an excess risk bound for the inexact M-SPP when the composite risk R is convex but not necessarily has quadratic growth property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 3 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≡ γ ≥ 19L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ min � ǫ n2t5 , 2G2 9n2γ � for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the average output ¯wT = 1 T �T t=1 wt of Algorithm 1 satisfies E [R( ¯wT ) − R∗] ≲ γ T D2(w0, W ∗) + L γnR∗ + � L γn + γ LnT + G √γnT � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly for γ = � T n + 19L n , it holds that E [R( ¯wT ) − R∗] ≲ � 1 √ nT + L nT � D2(w0, W ∗) + L √ nT R∗ + �L + G √ nT + 1 nT � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 for a proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 5 confirms that the excess risk bounds established in Theorem 3 for exact M-SPP are tolerant to sufficiently small sub-optimality ǫt ≤ O( 1 n2t5 ) of minibatch proximal ERM subproblems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4 Performance Guarantees with High Probability In the previous two sections, we have analyzed the excess risk bounds of M-SPP in expectation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this section, we move on to study high-probability guarantees of M-SPP with respect to the randomness of training data, still under the notion of algorithmic stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To this end, we first introduce a variant of M-SPP which carries out the proximal point update via sampling without replacement over the given data minibatches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We then show that the output of the proposed algorithm is uniformly stable in expectation over the randomness of sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' As a main result of this section, for strongly convex population risk, we establish a near-optimal high probability (with respect to data) bound on the estimation error ∥ ¯wt − w∗∥ that holds in expectation over the randomness of inner-data sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Additionally, we provide a high-probability generalization bound for arbitrary convex loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 15 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Sampling Without Replacement M-SPP Algorithm 3: Sampling Without Replacement M-SPP (M-SPP-SWoR) Input : Dataset S = {St}T t=1 in which St := {zi,t}n i=1 i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ∼ Dn, regularization modulus {γt > 0}t∈[T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Output: ¯wT as a weighted average of {wt}1≤t≤T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='. Initialization Specify a value of w0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Typically w0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' for t = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T do Uniformly randomly sample an index ξt ∈ [T] without replacement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Estimate wt satisfying Ft(wt) ≤ min w∈W � Ft(w) := RSξt(w) + γt 2 ∥w − wt−1∥2� + ǫt, (7) where ǫt ≥ 0 measures the sub-optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' end Let us consider the M-SPP-SWoR (M-SPP via Sampling Without Replacement) procedure as outlined in Algorithm 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Given a set S of T data minibatches, at each iteration, the algorithm uniformly randomly samples one minibatch from S without replacement for proximal update.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' After T rounds of iteration, all the minibatches are used to update the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since this procedure is merely a random shuffling variant of M-SPP as presented in Algorithm 1, we can see that all the in-expectation bounds established in the previous sections for M-SPP directly transfer to M-SPP- SWoR under any implementation of shuffling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' As we will show shortly in the next subsection that such a random shuffling scheme is beneficial for boosting the on-average algorithmic stability of M-SPP which then leads to strong high-probability guarantees for M-SPP-SWoR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 A Uniform Stability Analysis Let S = {St}t∈[T] and S′ = {S′ t}t∈[T] be two sets of data minibatches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We denote by St .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′ t if St and S′ t differ in a single data point, and by S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′ if S and S′ differ in a single mini- batch and a single data point in that minibatch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We introduce the following concept of uniform stability of M-SPP which substantializes the concept of uniform algorithmic stability that serves as a powerful tool for analyzing generalization bounds of statistical estimators and their learning algorithms (Bousquet and Elisseeff, 2002;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Hardt et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Feldman and Vondr´ak, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Definition 1 (Uniform Stability of M-SPP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The M-SPP algorithm is said to be ̺-uniformly stable with respect to a mapping h : W �→ Rq if ∥h( ¯wT ) − h( ¯w′ T )∥ ≤ ̺ for any pair of data sets S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following result gives a uniform stability (with respect to identical mapping) bound of the vanilla M-SPP (Algorithm 1) that holds deterministically, and a corresponding bound for M-SPP- SWoR (Algorithm 3) that holds in expectation over the randomness of minibatch sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 16 Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumption 1 holds and the loss function is bounded such that 0 ≤ ℓ(y′, y) ≤ M for all y, y′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let S = {St}t∈[T] and S′ = {S′ t}t∈[T] be two sets of data minibatches satisfying S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then (a) The weighted average output ¯wT and ¯w′ T respectively generated by M-SPP (Algorithm 1) over S and S′ satisfy sup S,S′ ∥ ¯wT − ¯w′ T ∥ ≤ 4 √ 2LM n mint∈[T] γt + T � t=1 2 �2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) The weighted average output ¯wT and ¯w′ T respectively generated by M-SPP-SWoR (Algo- rithm 3) over S and S′ satisfy sup S,S′ Eξ[T ] � ∥ ¯wT − ¯w′ T ∥ � ≤ T � t=1 � 4 √ 2LM nTγt + 2 � 2ǫt γt � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 for a proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that the sub-optimality {ǫt}t∈[T] are sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' If setting γt = O(t) as used for population risks with quadratic growth property, then Proposition 1 shows that M-SPP is O � 1 n � uniformly stable, while in expectation over the randomness of without-replacement sampling, M-SPP-SWoR has an much improved uniform stability parameter scaling as O �log(T) nT � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' If setting γt ≡ � T n as used for generic convex loss, then M-SPP will be O � 1 √ nT � uniformly stable while M-SPP-SWoR has an identical uniform stability parameter in expectation over sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the following theorem, based on the uniform stability bounds in Proposition 1, we derive an upper bound on the estimation error D( ¯wT , W ∗) of M-SPP-SWoR that holds with high probability over data distribution while in expectation over randomly sampling the minibatches for update.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1, 2, 3 hold and the loss function ℓ is bounded in the interval (0, M].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ρ ∈ (0, 1/4] be an arbitrary scalar and set γt = λρt 4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that n ≥ 76L λρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ min � ǫ nt4 , LM λρn2T 2t � for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then with probability at least 1 − δ over S, the weighted average output ¯wT of M-SPP-SWoR (Algorithm 3) satisfies Eξ[T ] [D( ¯wT , W ∗)] ≲ � LM log(1/δ) log(T) λρ √ nT + � ρ [R(w0) − R∗] λT 2 + L λ2ρnT R∗ + √ǫ λT 2 � L λρ + G � 1 λρ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 for a proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 17 Remark 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We comment on the optimality of the bound in Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider ρ = O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The first term of scale O �√ log(1/δ) log(T) √ nT � represents the overhead of getting generalization with high prob- ability over data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The second term matches the corresponding in-expectation estimation error bound in Theorem 4, which matches the known optimal rates for strongly convex SGD (Rakhlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Dieuleveut et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In view of the minimax lower bounds for statistical estimation (Tsybakov, 2008), the estimation error bound established in Theorem 6 is near-optimal for strongly convex risk minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Finally, we provide a high-probability generalization bound of M-SPP for arbitrary convex population risk functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 3 hold and the loss function ℓ is bounded in the interval [0, M].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≡ � T n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ LM 4nT 2√ nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then with probability at least 1 − δ over S, the average output ¯wT = 1 T �T t=1 wt of M-SPP (Algorithm 1) satisfies |R( ¯wT ) − RS( ¯wT )| ≲ (LM + G √ LM) log(N) log(1/δ) √ nT + M � log (1/δ) nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' See Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 for a proof of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We remark in passing that using similar uniform stability argument, the high-probability gen- eralization bound in Theorem 7 can be shown to hold for convex and non-smooth loss functions as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We omit the detailed analysis as it is out of the scope of this paper focusing on smooth losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 5 Comparison with Prior Methods Comparison with M-SPP and SPP methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The M-SPP algorithm considered in this article is a minibatch extension of the SPP methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The convergence analysis of SPP has received recent wide attention in stochastic optimization community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Specially for finite-sum optimization over N data points, an incremental SPP method was proposed and analyzed in (Bertsekas, 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For learning with linear prediction models and strongly convex Lipschitz-loss, (Toulis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2016) es- tablished a set of O( 1 Nγ ) rates of convergence for SPP with suitable γ ∈ (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5, 1], where N is the iteration counter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For arbitrary convex loss functions, the non-asymptotic convergence performance of SPP was studied with O( 1 √ N ) rate obtained for Lipschitz losses (Patrascu and Necoara, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Davis and Drusvyatskiy, 2019), O( 1 N ) for strongly convex and Lipschitz (Davis and Drusvyatskiy, 2019) or smooth (Patrascu and Necoara, 2017) losses, or O � log(N) N � rate for strongly convex non- smooth losses (Asi and Duchi, 2019b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Recently, it has been shown that the O � log(N) N � rate also ex- tends to M-SPP with strongly convex losses (Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The asymptotic and non-asymptotic behaviors of SPP for weakly convex losses (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', composite of convex loss with smooth map) have been studied for stochastic optimization with (Duchi and Ruan, 2018) or without (Davis and Drusvyatskiy, 18 2019) composite structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Among others, our work is most closely related to the minibatch prox- imal update method developed for communication-efficient distributed optimization (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Similarly from the viewpoint of algorithmic stability, the O( 1 Nγ ) rates were established for that method for Lipschitz-loss with arbitrary convexity (γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5) or strong convexity (γ = 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In comparison to these prior results, our convergence results for M-SPP are new in the following aspects: The convergence rates are derived for smooth losses and they explicitly show the impact of noise level of a statistical model, as encoded in R∗, to convergence performance which has not been previously known for SPP-type methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The O(N −1) fast rate attained in this article is valid for population risks with quadratic growth property, without requiring each instantaneous loss to be strongly convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We provide a near-optimal model estimation error bound of a sampling-without-replacement variant of M-SPP that holds with high probability over the randomness of data while in expectation over the randomness of sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Comparison with SGD and ERM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Similar to those in Theorem 1 and Theorem 3, the bias- variance composite rates have been known for accelerated SGD for least squares regression (Dieuleveut et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017), or minibatch SGD (M-SGD) for generic convex and smooth learning problems (Woodworth and Srebro, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' While the results are of similar flavor, we came to the path in a distinct algorithmic frame- work using quite different proof techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly, in contrast to Woodworth and Srebro (2021), our analysis neither uses the knowledge of model scale which is typically inaccessible in real problems, nor relies on the restarting arguments for strongly convex problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Also for SGD with smooth loss functions, a fast rate of O( 1 N ) has recently been established via stability theory in the ideally clean case where the optimal population risk is zero (Lei and Ying, 2020, Theorem 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' With γ = O( 1 n), the first bound of our Theorem 3 matches that bound in the context of M-SPP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For strongly convex problems, our results in Theorem 1 are stronger than (Lei and Ying, 2020, Theorem 12) in the sense that the formers (ours) only require the population risk to have quadratic growth property while the latter requires the loss to be Lipschitz and the empirical risk to be strongly convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Finally, for convex ERM, similar composite risk bounds have been estab- lished by Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2010);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017) under somewhat more stringent conditions such as bounded domain of interest and huge sample with N ≫ p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 19 Table 2 summaries a comparison of the risk bounds obtained in this work to several prior ones for (M-)SPP, (M-)SGD and ERM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Method Literature Risk Bound Conditions Loss R RS M-SPP Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2020) O � log(N) N � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — — Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017b) O � 1 N � Lip & s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — — Theorem 1 (our work) O � 1 T 2 + R∗ N � or O � 1 T 2 + 1+R∗ N � sm & cvx qg — Theorem 3 (our work) O � 1 N + R∗� or O � 1+R∗ √ N � sm & cvx — — SPP Asi and Duchi (2019b) O � log(N) N � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — — Patrascu and Necoara (2017) O � 1 N � sm & s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — Davis and Drusvyatskiy (2019) O � 1 N 2 + 1 N � Lip & s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — M-SGD Woodworth and Srebro (2021) O � 1 T 2 + 1 N + � R∗ N � sm & cvx — — O � e−T + R∗ N � sm & cvx qg — Dieuleveut et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017) O � 1 N 2 + R∗ N � quadratic s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — SGD Lei and Ying (2020) O � 1 N + R∗� or O � 1+R∗ √ N � sm & cvx — s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx Rakhlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2012) O � 1 N � Lip & sm & cvx s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — ERM Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2017) O � p N + R∗ N � or O � 1 N 2 + R∗ N � for N ≳ p sm & cvx Lip & s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx — Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2010) O � 1 N + � R∗ N � sm & cvx — — Table 2: Comparison of our risk bounds to some prior results for M-SPP and SPP as well as for SGD and ERM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Recall that T is the iteration count and N is the total number of samples accessed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' All the listed bounds hold in expectation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Here we have used the following abbreviations: cvx (convex), s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='cvx (strongly convex), Lip (Lipschitz continuous), sm (smooth), qg (quadratic growth).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 20 6 Experiments We carry out a set of numerical study to demonstrate the convergence performance of minibatch stochastic proximal point methods in (composite) statistical learning problems, to answer the fol- lowing 3 questions associated with the key theory and algorithms established in this article: Question 1: How the size of minibatch and noise level of a statistical learning model affect the convergence speed of M-SPP for smooth loss function?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This question is mainly about verifying Theorem 1 and Theorem 5, and it is answered through a simulation study on Lasso estimation in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Question 2: Can the two-phase variant of M-SPP improve over M-SPP in the small-T-large-n setting?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The simulation results presented in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 also answer this question related to the verification of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Question 3: How M-SPP(-TP) methods compare with M-SGD in convergence performance?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The real-data experimental results on logistic regression tasks in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 answer this question about algorithm comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Simulation Study We first provide a simulation study to verify our theoretical results for smooth losses when substan- tialize to the widely used Lasso regression model (Wainwright, 2009) with quadratic loss function ℓ(fw(x), y) = 1 2(y−w⊤x)2 and r(fw) = µ∥w∥1 where µ is the ℓ1-penalty modulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Given a model pa- rameter ¯w ∈ Rp and a feature point x ∈ Rp drawn from standard Gaussian distribution N(0, Ip×p), the responses y is generated according to a linear model y = ¯w⊤x + ε with a random Gaussian noise ε ∼ N(0, σ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this case, the population risk function can be expressed in a close form as R(w) = 1 2∥w − ¯w∥2 + σ2 2 + µ∥w∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Given a set of T random n-minibatches � St = {xi,t, yi,t}i∈[n] � t∈[T] drawn from the above data distribution, we aim at evaluating the convergence performance of M-SPP towards the minimizer of R which can be expressed as w∗ = ( ¯w − µ)+ − (− ¯w − µ)+, where (·)+ is an element-wise function that preserves the positive parts of a vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We test with p = 5000 and N = nT = 100p, and consider a well-specified sparse regression model where the true parameter vector ¯w is ¯k-sparse with ¯k = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2p and its non-zero entries are sampled from a zero-mean Gaussian distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We set µ = 10−3 and initialize w(0) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The inner-loop minibatch proximal Lasso subproblems are optimized via a standard proximal gradient descent method, using either of the following two termination criteria: 1) the difference between consecutive objective values is below 10−3 and 2) the iteration step reaches 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 21 0 1 2 3 4 5 105 5 0 5 10 (a) Results under varying T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 0 1 2 3 4 5 105 5 0 5 10 (b) Results under varying σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 0 1 2 3 4 5 105 10 5 0 5 10 15 (c) M-SPP versus M-SPP-TP Figure 1: Simulation study on Lasso regression: Convergence performances of M-SPP and M-SPP- TP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The y-axis represents the logarithmic scale of excess risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following two experimental setups are considered for theory verification: We fix the noise level σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 and study the impact of varying T ∈ {10, 20, 100, 500} on the convergence of M-SPP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Figure 1(a) shows the evolving curves of excess risk as functions of sample size, in a semi-log layout with y-axis representing the logarithmic scale of excess risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From this set of curves we can observe a clear trend that in the early stage, M-SPP converges faster when the total number of minibatches is relatively large (say, T ∈ {20, 100}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is consistent with the prediction of Theorem 1 about the impact of T and n on convergence rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' While in the final stage, relatively slower convergence behavior is exhibited under relatively larger T (say, T ∈ {100, 500}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This observation can be explained by the inexact analysis in Theorem 4 which shows that to guarantee the desired convergence rate, the inner-loop proximal ERM update needs to be extremely accurate when T is relatively large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Therefore, the question raised in Question 1 on the impact of minibatch size on convergence rate is answered by this group of results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Also in this setup, we have compared M-SPP and its two-phase variant M-SPP-TP for T ∈ {5, 10}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The related results are shown in Figure 1(c), which indicate that M-SPP-TP sig- nificantly improves the convergence of M-SPP in the small-T-large-n cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This observation supports the result of Theorem 2 and answers Question 2 affirmatively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We fix T = 50 and study the impact of varying noise level σ ∈ {0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1, 1, 5} on the convergence performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The results are shown in Figure 1(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From this group of results we can see that faster convergence speed is attained at relatively smaller noise level σ, while the speed becomes insensitive to noise level when σ is sufficiently small (say, σ ≤ 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is consistent with the predication by Theorem 1, keeping in mind the fact that R∗ = 1 2∥w∗ − ¯w∥2 + σ2 2 + µ∥w∗∥1 ≤ ∥ ¯w∥2 + 1 2σ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The question raised in Question 1 on the impact of noise level on convergence performance is answered by this group of results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 22 (a) n = N/5 (b) n = N/20 (c) n = N/100 Figure 2: Real-data results on logistic regression: Test error convergence comparison on gisette under varying minibatch size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 Experiment on Real Data We further compare our methods with M-SGD for binary prediction problems using the logistic loss ℓ(w⊤x, y) = log(1+exp(−yw⊤x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Here the M-SGD method is implemented by an SGD solver from SGDLibrary (Kasai, 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For M-SPP and M-SPP-TP, the The inner-loop minibatch proximal ERMs are solved by the same SGD solver applied with a fixed SGD-batch-size 10 and a single epoch of data processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We initialize w(0) = 0 for all the considered methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We use two public data sets for evaluation: the gisette data (Guyon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2004) with p = 5000, N = 6000 and the covtype.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='binary data (Collobert et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2001) with p = 54, N = 581, 012 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For each data set, we use half of the samples as training set and the rest as test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We are interested in the impact of minibatch-size n on the prediction performance of model measured by test error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' All the considered stochastic algorithms are executed with 10 epochs of data processing, and thus the overall number of minibatches is T = N/n × 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We replicate each experiment 10 times over random split of data and report the results in mean-value along with error bar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Figure 2, we show the evolving curves (error bar shaded in color) of test error with respect to the number of minibatches accessed on gisette, under varying minibatch size n ∈ {N 5 , N 20, N 100}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From this set of curves we can observe that: Under the same minibatch size, M-SPP and M-SPP-TP converge faster and stabler than M-SGD, especially when the minibatch size is relatively large (see Figure 2(a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is as expected because when minibatch size becomes large, M-SGD approaches to gradient descent method while M-SPP approaches ERMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This answers Question 3 raised at the beginning of the experiment section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' M-SPP-TP exhibits sharper convergence behavior than M-SPP at the early stage of iteration, especially when the minibatch-size is relatively large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This is consistent with our theoretical results in Theorem 1 and Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1Both data sets are available at https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='csie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ntu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='tw/~cjlin/libsvmtools/datasets/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5 M-SGD 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 M-SPP Test Error M-SPP-TP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 0 0 200 400 600 800 1000 #Minibatches0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5 M-SGD 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 M-SPP Test Error M-SPP-TP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 0 0 50 100 150 200 #Minibatches0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5 M-SGD 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 M-SPP Test Error M-SPP-TP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 0 0 10 20 30 40 50 #Minibatches(a) n = N/20 (b) n = N/100 (c) n = N/1000 Figure 3: Real-data results on logistic regression: Test error convergence comparison on covtype.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='binary under varying minibatch size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Figure 3 shows the corresponding results on covtype under n ∈ � N 20, N 100, N 1000 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From this set of results we once again see that M-SPP and M-SPP-TP consistently outperform M-SGD under the same minibatch size, and M-SPP-TP converges faster than M-SPP under relatively larger minibatch size (say, n = N 20).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Conclusions and Future Prospects In this article, we presented an improved convergence analysis for the minibatch stochastic proximal point methods with smooth and convex losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Under the quadratic growth condition on population risk, we showed that M-SPP with minibatch-size n and iteration count T converges at a composite rate consisting of an O( 1 T 2) bias decaying component and an O( 1 N ) variance decaying component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In the small-n-large-T case, this result substantially improves the prior relevant results of SPP- type approaches which typically require each instantaneous loss to be Lipschitz and strongly convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Complementally in the small-T-large-n setting, we provide a two-phase acceleration of M-SPP which improves the O( 1 T 2) bias decaying rate to O � log(N) N2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Perhaps the most interesting theoretical finding is that the (dominant) variance decaying term has a factor dependence on the minimal value of population risk, justifying the sharper convergence behavior of M-SPP in low-noise statistical setting as backed up by our numerical evidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In addition to the in-expectation risk bounds, we have also derived a near-optimal parameter estimation error bound for a random shuffling variant of M-SPP that holds with high probability over data distribution and in expectation over the random shuffling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To conclude, our theory lays a novel and stronger foundation for understanding the convex M-SPP style algorithms that have gained recent significant attention, both in theory and practice, for large-scale machine learning (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 M-SGD M-SPP Test Error M-SPP-TP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 0 500 1000 1500 2000 #Minibatches0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 M-SGD M-SPP Test Error M-SPP-TP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 0 200 400 600 800 #Minibatches0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 M-SGD M-SPP Test Error M-SPP-TP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 0 50 100 150 200 #MinibatchesThere are several key prospects for future investigation of our theory: It is still open to derive near-optimal exponential excess risk bounds for M-SPP that apply to the (suffix) average or last of iterates over training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Inspired by the recent progresses made towards understanding M-SPP with momentum ac- celeration (Deng and Gao, 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Chadha et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2022), it is interesting to provide momentum and weakly-convex extensions of our theory for smooth loss functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Last but not least, we expect that the theory developed in this article can be extended to the setup of non-parametric learning with minibatch stochastic proximal point methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Acknowledgements The authors sincerely thank the anonymous referees for their constructive comments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The work of Xiao-Tong Yuan is also funded in part by the National Key Research and Development Program of China under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2018AAA0100400 and in part by the Natural Science Foundation of China (NSFC) under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='U21B2049, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='61876090 and No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='61936005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 25 A Proofs for the Results in Section 2 In this section, we present the technical proofs for the main results stated in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Proof of Theorem 1 Here we prove Theorem 1 as restated below for convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 2 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider ǫt ≡ 0 and the weighted average output ¯wT = 2 T(T+1) �T t=1 twt in Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ρ ∈ (0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5] be an arbitrary scalar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (a) Suppose that n ≥ 64L λρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt = λρt 4 for t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 1, E [R( ¯wT ) − R∗] ≤ 4ρ [R(w0) − R∗] T 2 + 29L λρnT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) Set γt = λρt 4 + 16L n for t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 1, E [R( ¯wT ) − R∗] ≤ � 4ρ T 2 + 28L λnT � [R(w0) − R∗] + � 216L2 λ2ρ2n2T + 29L λρnT � R∗, We first present the following lemma which will be used in the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It can be viewed as a straightforward extension of the prior result (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2017b, Lemma 1) to the setup of composite minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A proof is included here for the sake of completeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that the loss function ℓ is convex with respect to its first argument and the regularization function r is convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any w ∈ W, we have RSt(wt) − RSt(w) ≤ γt 2 � ∥w − wt−1∥2 − ∥w − wt∥2 − ∥wt − wt−1∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since ℓ and r are both convex, RSt is convex over W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The optimality of wt implies that for any w ∈ W and η ∈ (0, 1) RSt(wt) + γt 2 ∥wt − wt−1∥2 ≤ RSt((1 − η)wt + ηw) + γt 2 ∥(1 − η)wt + ηw − wt−1∥2 ≤(1 − η)RSt(wt) + ηRSt(w) + γt 2 � (1 − η)∥wt − wt−1∥2 + η∥w − wt−1∥2 − η(1 − η)∥w − wt∥2� , where in the last inequality we have used the definition of the norm ∥ · ∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Rearranging both sides of the above inequality yields η(RSt(wt) − RSt(w)) ≤ ηγt 2 � ∥w − wt−1∥2 − (1 − η)∥w − wt∥2 − ∥wt − wt−1∥2� , which then implies (keep in mind that η > 0) RSt(wt) − RSt(w) ≤ γt 2 � ∥w − wt−1∥2 − (1 − η)∥w − wt∥2 − ∥wt − wt−1∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Limiting η → 0+ in the above inequality yields the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 26 The following boundedness result for smooth function is due to Srebro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2010, Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' If g is non-negative and L-smooth, then ∥∇g(w)∥ ≤ � 2Lg(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let {Ft}t≥1 be the filtration generated by the iterates {wt}t≥1 as Ft = σ (w1, w2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', wt).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' With Lemma 1 and Lemma 2 in place, we can further establish the following key lemma that plays a fundamental role in proving Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that the Assumptions 1 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≥ 16L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then we have E [R(wt) − R∗ | Ft−1] ≤ γt � D2(wt−1, W ∗) − E � D2(wt, W ∗) | Ft−1 �� + 16L γtn R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let us consider a sample set S(i) t which is identical to St except that one of the zi,t is replaced by another random sample z′ i,t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Denote w(i) t = arg min w∈W � F (i) t (w) := RS(i) t (w) + γt 2 ∥w − wt−1∥2� , where RS(i) t (w) := 1 n �� j̸=i ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zj,t) + ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) � + r(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then we can show that Ft(w(i) t ) − Ft(wt) = 1 n � j̸=i � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zj,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zj,t) � + 1 n � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) � + r(w(i) t ) − r(wt) + γt 2 ∥w(i) t − wt−1∥2 − γt 2 ∥wt − wt−1∥2 =F (i) t (w(i) t ) − F (i) t (wt) + 1 n � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) � − 1 n � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) � ≤ 1 n ���ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) ��� + 1 n ���ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) ��� ζ1≤ ∥∇ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t)∥ + ∥∇ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t)∥ n ∥w(i) t − wt∥ ζ2≤ � 2Lℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) + � 2Lℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) n ∥w(i) t − wt∥, where “ζ1” is due to the convexity of loss and in “ζ2”we have used Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The bound in Lemma 1 implies Ft(w(i) t ) − Ft(wt) ≥ γt 2 ∥w(i) t − wt∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Combining the preceding two inequalities yields γt 2 ∥w(i) t − wt∥ ≤ � 2Lℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) + � 2Lℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) n , 27 which immediately gives ∥w(i) t − wt∥ ≤ 2 �� 2Lℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) + � 2Lℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) � γtn .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (8) Let us now consider the following population risk and empirical risk over St with respect to the loss function ℓ: Rℓ(w) := E(x,y)∼D[ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z)], Rℓ St(w) := 1 n n � i=1 ℓ(w;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since St and S(i) t are both i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' samples of the data distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It follows that ESt � Rℓ(wt) | Ft−1 � = ESt∪{z′ i,t} � ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) | Ft−1 � =ES(i) t � Rℓ(w(i) t ) | Ft−1 � = ES(i) t ∪{zi,t} � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) | Ft−1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since the above holds for all i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', n, we can further show that ESt � Rℓ(wt) | Ft−1 � = 1 n n � i=1 ES(i) t ∪{zi,t} � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) | Ft−1 � = 1 n n � i=1 ESt∪{z′ i,t} � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) | Ft−1 � = 1 n n � i=1 ESt∪{z′ i,t} � ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) | Ft−1 � = 1 n n � i=1 ES(i) t ∪{zi,t} � ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) | Ft−1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (9) Regarding the empirical case, we find that ESt � Rℓ St(wt) | Ft−1 � = 1 n n � i=1 ESt [ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) | Ft−1] = 1 n n � i=1 ESt∪{z′ i,t} [ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) | Ft−1] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 28 Combining the preceding two equalities gives that |ESt [R(wt) − RSt(wt) | Ft−1]| = ���ESt � Rℓ(wt) − Rℓ St(wt) | Ft−1 ���� = ����� 1 n n � i=1 ESt∪{z′ i,t} � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) | Ft−1 ������ ≤ 1 n n � i=1 ESt∪{z′ i,t} ����ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) − ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) ��� | Ft−1 � ≤ 1 n n � i=1 ESt∪{z′ i,t} �� 2Lℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t)∥w(i) t − wt∥ | Ft−1 � (8) ≤ 1 n n � i=1 ES(i) t ∪{zi,t} \uf8ee \uf8f04Lℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) γtn + 4L � ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t)ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) γtn | Ft−1 \uf8f9 \uf8fb ζ1≤ � L γtn � 1 n n � i=1 ESt∪{z′ i,t} � 6ℓ(w(i) t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' zi,t) + 2ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z′ i,t) | Ft−1 � (9) = 8L γtnESt � Rℓ(wt) | Ft−1 � ≤ 8L γtnESt [R(wt) | Ft−1] , where in “ζ1” we have used the fact a2 + b2 ≥ 2ab and the last inequality is due to the fact r ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let us now denote w∗ t = arg minw∈W ∗ ∥w − wt∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Conditioned on Ft−1, taking expectation on both sides of the bound in Lemma 1 for w = w∗ t−1 yields ESt [RSt(wt) − R∗ | Ft−1] ≤γt 2 ESt � ∥w∗ t−1 − wt−1∥2 − ∥w∗ t−1 − wt∥2 − ∥wt − wt−1∥2 | Ft−1 � ≤γt 2 � ∥w∗ t−1 − wt−1∥2 − ESt � ∥w∗ t − wt∥2 | Ft−1 �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='Combining the preceding two inequalities yields ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ESt [R(wt) − R∗ | Ft−1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='=ESt [R(wt) − RSt(wt) + RSt(wt) − R∗ | Ft−1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤ |ESt [R(wt) − RSt(wt) | Ft−1]| + ESt [RSt(wt) − R∗ | Ft−1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤γt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t−1 − wt−1∥2 − ESt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t − wt∥2 | Ft−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 8L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γtnESt [R(wt) | Ft−1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='=γt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t−1 − wt−1∥2 − ESt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t − wt∥2 | Ft−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 8L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γtnESt [R(wt) − R∗ | Ft−1] + 8L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γtnESt [R∗] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤γt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t−1 − wt−1∥2 − ESt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t − wt∥2 | Ft−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2ESt [R(wt) − R∗ | Ft−1] + 8L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γtnR∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' where in the last inequality we have used the condition γt ≥ 52L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' After rearranging the terms in 29 the above inequality we obtain ESt [R(wt) − R∗ | Ft−1] ≤γt � ∥w∗ t−1 − wt−1∥2 − ESt � ∥w∗ t − wt∥2 | Ft−1 �� + 16L γtn R∗ =γt � D2(wt−1, W ∗) − ESt � D2(wt, W ∗) | Ft−1 �� + 16L γtn R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This implies the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following lemma is a direct consequence of Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that the Assumptions 1 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≥ 16L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the following holds for all t ≥ 1: E � D2(wt, W ∗) � ≤ D2(w0, W ∗) + t � τ=1 16L γ2τ nR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since R(wt) ≥ R∗ and γt ≥ 52L n , the bound in Lemma 3 immediately implies that ESt � D2(wt, W ∗) | Ft−1 � ≤ D2(wt−1, W ∗) + 16L γ2 t nR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (10) By unfolding the above recurrent from time instance t to zero we obtain that for all t ≥ 1, E � D2(wt, W ∗) � ≤ D2(w0, W ∗) + t � τ=1 16L γ2τ nR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This proves the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' With all these lemmas in place, we are now ready to prove the main result in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Part (a): Note that the condition on n implies γt = λρt 4 ≥ λρ 4 ≥ 16L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Applying Lemma 3 along with the condition R(wt) − R∗ ≥ λ 2D2(wt, W ∗) yields (1 − ρ)E [R(wt) − R∗ | Ft−1] ≤γtD2(wt−1, W ∗) − � γt + λρ 2 � E � D2(wt, W ∗) | Ft−1 � + 24L γtn R∗ ≤λρt 4 D2(wt−1, W ∗) − λρ(t + 2) 4 E � D2(wt, W ∗) | Ft−1 � + 26L λρtnR∗ ≤λρt 4 D2(wt−1, W ∗) − λρ(t + 2) 4 E � D2(wt, W ∗) | Ft−1 � + 27L λρ(t + 1)nR∗, where in the last inequality we have used 1 t ≤ 2 t+1 for t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The above inequality implies tE [R(wt) − R∗ | Ft−1] ≤(t + 1)E [R(wt) − R∗ | Ft−1] ≤λρt(t + 1) 4(1 − ρ) D2(wt−1, W ∗) − λρ(t + 1)(t + 2) 4(1 − ρ) E � D2(wt, W ∗) | Ft−1 � + 27L λnρ(1 − ρ)R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 30 Then based on the law of total expectation and after proper rearrangement we obtain tE [R(wt) − R∗] ≤λρt(t + 1) 4(1 − ρ) E � D2(wt−1, W ∗) � − λρ(t + 1)(t + 2) 4(1 − ρ) E � D2(wt, W ∗) � + 27L λnρ(1 − ρ)R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (11) By summing the above inequality from t = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T and after normalization we obtain 2 T(T + 1) T � t=1 tE [R(wt) − R∗] ≤ λρ T(T + 1)(1 − ρ)D2(w0, W ∗) + 28L λρ(1 − ρ)(T + 1)nR∗ ≤ 2λρ T(T + 1)D2(w0, W ∗) + 29L λρ(T + 1)nR∗, where in the last inequality we have used ρ ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider the weighted output ¯wT = 2 T(T+1) �T t=1 twt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In view of the above inequality and the convexity and quadratic growth property of the risk function R we have E [R( ¯wT ) − R∗] ≤ 4ρ [R(w0) − R∗] T(T + 1) + 29L λρn(T + 1)R∗, which then implies the desired bound in part (a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Part (b): Note that γt = λρt 4 + 16L n ≥ 16L n for all t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' According to Lemma 4 we have the following holds for all t ≥ 1: E � D2(wt, W ∗) � ≤D2(w0, W ∗) + t � τ=1 16L γ2τ nR∗ ≤ D2(w0, W ∗) + 28L λ2ρ2nR∗ t � τ=1 1 τ 2 ≤ D2(w0, W ∗) + 29L λ2ρ2nR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (12) Similar to the argument in part (a), applying Lemma 3 along with the quadratic growth condition R(wt) − R∗ ≥ λ 2D2(wt, W ∗) and ρ ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5 yields 1 2E [R(wt) − R∗ | Ft−1] ≤(1 − ρ)E [R(wt) − R∗ | Ft−1] ≤γtD2(wt−1, W ∗) − � γt + λρ 2 � E � D2(wt, W ∗) | Ft−1 � + 24L γtn R∗ ≤λρt 4 D2(wt−1, W ∗) − λρ(t + 2) 4 E � D2(wt, W ∗) | Ft−1 � + 16L n � D2(wt−1, W ∗) − E � D2(wt, W ∗) | Ft−1 �� + 26L λρtnR∗, where in the second inequality we have used γt ≥ 52L n , and in the last inequality we have used 31 γt ≥ λρt 4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then based on the law of total expectation and after proper rearrangement we have E [R(wt) − R∗] ≤λρt 2 E � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � − λρ(t + 2) 2 E � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � + 25L n � E � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � − E � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) �� + 27L λtnρR∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' which implies that tE [R(wt) − R∗] ≤(t + 1)E [R(wt) − R∗] ≤λρt(t + 1) 2 E � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � − λρ(t + 1)(t + 2) 2 E � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � + 25L(t + 1) n � E � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � − E � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) �� + 27L(t + 1) λtnρ R∗ ≤λρt(t + 1) 2 E � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � − λρ(t + 1)(t + 2) 2 E � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � + 26Lt n � E � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) � − E � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) �� + 28L λnρR∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' where in the last inequality we have used the fact t + 1 ≤ 2t as t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' By summing the above inequality from t = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T and after normalization we obtain 2 T(T + 1) T � t=1 tE [R(wt) − R∗] ≤ 2λρ T(T + 1)D2(w0, W ∗) + 27L nT(T + 1) T � t=1 D2(wt−1, W ∗) + 29L λρ(T + 1)nR∗ ≤ 2λρ T(T + 1)D2(w0, W ∗) + 27L nT(T + 1) T � t=1 � D2(w0, W ∗) + 29L λ2ρ2nR∗ � + 29L λρ(T + 1)nR∗ = � 2λρ T(T + 1) + 27L n(T + 1) � D2(w0, W ∗) + � 216L2 λ2ρ2n2(T + 1) + 29L λρn(T + 1) � R∗, where in the last inequality we have used (12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Using the convexity and quadratic growth property in the above inequality yields E [R( ¯wT ) − R∗] ≤ � 4ρ T(T + 1) + 28L λn(T + 1) � [R(w0) − R∗] + � 216L2 λ2ρ2n2(T + 1) + 29L λρn(T + 1) � R∗, which then implies the desired bound in part (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The proof is concluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 32 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 Proof of Theorem 2 In this subsection we prove Theorem 2 which is restated below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 2 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider ǫt ≡ 0 for implementing M- SPP in both Phase-I and Phase-II of Algorithm 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider the weighted average output ¯wT = 2 (T−1)(T+2) �T t=2 twt in Phase-II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (a) Suppose that n ≥ 128L λ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set m = 128L λ in Phase-I and γt = λt 8 for implementing M-SPP in both Phase-I and Phase II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 2, ¯wT satisfies E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] λ2n2T 2 + L λnT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) Set m = O(1) in Phase-I and γt = λt 8 + 16L n for implementing M-SPP in both Phase-I and Phase-II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 2, ¯wT satisfies E [R( ¯wT ) − R∗] ≲ L2 [R(w0) − R∗] λ2nT + L3 λ3nT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Part (a): In Phase-I, by invoking the first part of Theorem 1 with ρ = 1/2 and T = n/m ≥ 1 (with slight abuse of notation) we get immediately that ES1 [R(w1) − R∗] ≤ 2m2 [R(w0) − R∗] n2 + 210L λn R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (13) In Phase-II, conditioned on F1, summing the recursion form (11) from t = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T with ρ = 1/2 and proper normalization yields 2 (T − 1)(T + 2) T � t=2 tES2:t [R(wt) − R∗ | F1] ≤ 6λD2(w1, W ∗) (T − 1)(T + 2) + 210L λn(T + 2)R∗ ≤ 3 (R(w1) − R∗) (T − 1)(T + 2) + 210L λn(T + 2)R∗, where in the last inequality we have used the quadratic growth property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider the weighted av- erage output ¯wT = 2 (T−1)(T+2) �T t=2 twt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Based on the above inequality and law of total expectation we must have E [R( ¯wT ) − R∗] ≤6ES1 [R(w1) − R∗] (T − 1)(T + 2) + 210L λn(T + 2)R∗ ≤6ES1 [R(w1) − R∗] T 2 + 2102L λnT R∗ ≤12m2 [R(w0) − R∗] n2T 2 + 213L λnT R∗ ≤222L2 [R(w0) − R∗] λ2n2T 2 + 213L λnT R∗, where we have used the fact T ≥ 2 in multiple places and in the last but one step we have used (13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This immediately implies the desired bound in Part (a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 33 Part (b): In Phase-I, by applying second part of Theorem 1 (with ρ = 1/2 and T = n/m ≥ 1) and preserving the leading terms we obtain that ES1 [R(w1) − R∗] ≲ �m2 n2 + L λn � [R(w0) − R∗] + � L2 λ2mn + L λn � R∗ ≲ L λn[R(w0) − R∗] + L2 λ2nR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (14) In Phase-II, based on the proof argument of the part (b) of Theorem 1 we can show that the weighted average output ¯wT = 2 (T−1)(T+2) �T t=2 twt satisfies E [R( ¯wT ) − R∗] ≲ � 1 T 2 + L λnT � ES1 [R(w1) − R∗] + � L2 λ2n2T + L λnT � R∗ ≲ � L λnT 2 + L2 λ2n2T � [R(w0) − R∗] + � L3 λ3n2T + L2 λ2nT � R∗ ≲ L2 λ2nT [R(w0) − R∗] + L3 λ3nT R∗, where in the second step we have used (14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This proves the desired bound in Part (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 Proof of Theorem 3 In this subsection, we prove Theorem 3 as following restated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumption 1 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≡ γ ≥ 16L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ¯wT = 1 T �T t=1 wt be the average output of Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then E [R( ¯wT ) − R∗] ≲ γ T D2(w0, W ∗) + L γnR∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly for γ = � T n + 16L n , it holds that E [R( ¯wT ) − R∗] ≲ � 1 √ nT + L nT � D2(w0, W ∗) + L √ nT R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since γt ≡ γ ≥ 16L n , the bound in Lemma 3 is valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Based on law of total expectation and by summing that inequality from t = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T with proper normalization we obtain 1 T T � t=1 E [R(wt) − R∗] ≤ γ T D2(w0, W ∗) + 16L γn R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider ¯wT = 1 T �T t=1 wt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In view of the above inequality and convexity of R we have E [R( ¯wT ) − R∗] ≤ γ T D2(w0, W ∗) + 16L γn R∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This proves the first desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The second bound follows immediately by substituting γ = � T n + 16L n > 16L n into the above bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The proof is concluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 34 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4 On the (Iteration) Stability of M-SPP In this appendix subsection, we further provide a sensitivity analysis of M-SPP to the choice of reg- ularization modulus {γt}t≥1, under the following notion of iteration stability essentially introduced by Asi and Duchi (2019a,b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A stochastic optimization algorithm generating iterates {wt}t≥1 for minimizing the population risk R(w) is staid to be stable if sup t≥1 D(wt, W ∗) < ∞, with probability 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Before presenting the main results on the iteration stability of M-SPP, we first recall the Robbins-Siegmund nonnegative almost supermartingale convergence lemma which is typically used for establishing the stability and convergence of stochastic optimization methods including SPP (Asi and Duchi, 2019b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 5 (Robbins and Siegmund (1971)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider four sequences of nonnegative random vari- ables {Ut}, {Vt}, {αt}, {βt} that are measurable over a filtration {Ft}t≥0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that � t αt < ∞, � t βt < ∞, and E[Ut+1 | Ft] ≤ (1 + αt)Ut + βt − Vt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then there exits U∞ such that Ut a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' −−→ U∞ and � t Vt < ∞ with probability 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following proposition shows that the sequence of estimation error {∥wt − w∗∥} is non- divergent in expectation and it converges to some finite value and is bounded with probability 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that the Assumptions 1 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that γt ≥ 16L n and � t≥1 Lγ−2 t < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then we have the following hold: (a) E [D(wt, W ∗)] < ∞;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) D(wt, W ∗) converges to some finite value and supt≥1 D(wt, W ∗) < ∞ with probability 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Applying Lemma 4 yields that for all t ≥ 1 E � D2(wt, W ∗) � ≲ D2(w0, W ∗) + t � τ=1 L γ2τnR∗ < ∞, where we have used the given conditions on γt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This proves the part (a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To show the part (b), invoking Lemma 5 with αt = Vt ≡ 0 and βt = 16L γ2 t nR∗ to (10) yields D(wt, W ∗) converges to some finite value and thus supt≥1 D(wt, W ∗) < ∞ almost surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Remark 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proposition 2 shows that in contrast to minibatch SGD, the choice of γt in M-SPP is insensitive to the gradient scale of loss functions for generating a non-divergent sequence of estimation errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 35 B Proofs for the Results in Section 3 In this section, we present the technical proofs for the main results stated in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Proof of Theorem 4 In this subsection, we prove Theorem 4 which is restated below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1, 2 and 3 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ρ ∈ (0, 1/4] be an arbitrary scalar and set γt = λρt 4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that n ≥ 76L λρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ ǫ nt4 for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any T ≥ 1, the weighted average output ¯wt = 2 T(T+1) �T t=1 twt of Algorithm 1 satisfies E [R( ¯wt) − R∗] ≲ ρ T 2 (R(w0) − R∗) + L λρnT R∗ + √ǫ T 2 � L λρ + G � 1 λρ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Preliminaries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In what follows, we denote by ˜wt := arg minw∈W Ft(w) the exact solution of the inner-loop minibatch ERM optimization, which plays the same role as wt in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We first present the following lemma that upper bounds the discrepancy between the inexact minimizer wt and the exact minimizer ˜wt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that the loss function ℓ is convex with respect to its first argument and r is convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any w ∈ W, we have ∥wt − ˜wt∥ ≤ � 2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Using arguments identical to those of Lemma 1 we can show that for all w ∈ W, RSt( ˜wt) − RSt(w) ≤ γt 2 � ∥w − wt−1∥2 − ∥w − ˜wt∥2 − ∥ ˜wt − wt−1∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (15) Setting w = wt in the above yields γt 2 ∥wt − ˜wt∥2 ≤ Ft(wt) − Ft( ˜wt) ≤ ǫt, which directly implies ∥wt − ˜wt∥ ≤ � 2ǫt/γt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This proves the second desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following lemma as an extension of Lemma 3 to the inexact setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that the Assumptions 1, 2 and 3 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that γt ≥ 19L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the following bound holds for any ρ ∈ (0, 1): E [R(wt) − R∗ | Ft−1] ≤γt � D2(wt−1, W ∗) − E �� 1 − ρλ 2γt � D2(wt, W ∗) | Ft−1 �� + 19L γtn R∗ + � 3n + 4γt ρλ � ǫt + 3G � 2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 36 Alternatively, for any w∗ ∈ W ∗, under Assumptions 1 and 3 we have E [R(wt) − R∗ | Ft−1] ≤γt � ∥wt−1 − w∗∥2 − E � ∥wt − w∗∥2 | Ft−1 �� + 19L γtn R∗ + 3nǫt + � 2 � 2γtE [∥wt − w∗∥ | Ft−1] + 3G � 2 γt � √ǫt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let us decompose E [R(wt) − R∗ | Ft−1] into the following three terms: E [R(wt) − R∗ | Ft−1] = E [R(wt) − R( ˜wt) | Ft−1] � �� � A + E [R( ˜wt) − RSt( ˜wt) | Ft−1] � �� � B + E [RSt( ˜wt) − R∗ | Ft−1] � �� � C .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We next bound these three terms respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To bound the term A, we can show that |A| := |E [R(wt) − R( ˜wt) | Ft−1] | = ���E � Rℓ(wt) − Rℓ( ˜wt) | Ft−1 � + E [r(wt) − r( ˜wt)] | Ft−1 ��� ≤E [Ez|ℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) − ℓ( ˜wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z)| | Ft−1] + E [|r(wt) − r( ˜wt)| | Ft−1] ζ1≤E � Ez �� 2Lℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z)∥wt − ˜wt∥ � | Ft−1 � + E [G∥wt − ˜wt∥ | Ft−1] ≤E � Ez � L γtnℓ(wt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) + γtn 2 ∥wt − ˜wt∥2 � | Ft−1 � + E [G∥wt − ˜wt∥ | Ft−1] =E � L γtnRℓ(wt) | Ft−1 � + ESt �γtn 2 ∥wt − ˜wt∥2 + G∥wt − ˜wt∥ | Ft−1 � ≤E � L γtnR(wt) | Ft−1 � + nǫt + G �2ǫt γt , where in “ζ1” we have used the convexity of loss and Lemma 2 and the Assumption 3 and in the last inequality we have used r > 0 and the perturbation bound of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To bound the term B, using about the same proof arguments as for Lemma 3 we can show that B :=E [R( ˜wt) − RSt( ˜wt) | Ft−1] ≤ 8L γtnE [R( ˜wt) | Ft−1] = 8L γtnE [R( ˜wt) − R(wt)] + 8L γtnE [R(wt) | Ft−1] ≤1 2|A| + 8L γtnE [R(wt) | Ft−1] , where we have used the condition on minibatch size γt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To bound the term C,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' based on the definition of ˜wt and by invoking Lemma 1 with w = w∗ t−1 37 we can verify that C :=E [RSt( ˜wt) − R∗ | Ft−1] ≤γt 2 E � ∥w∗ t−1 − wt−1∥2 − ∥w∗ t−1 − ˜wt∥2 − ∥ ˜wt − wt−1∥2 | Ft−1 � ≤γt 2 E � ∥w∗ t−1 − wt−1∥2 − ∥w∗ t−1 − ˜wt∥2 | Ft−1 � =γt 2 � ∥w∗ t−1 − wt−1∥2 − E � ∥w∗ t−1 − wt + wt − ˜wt∥2 | Ft−1 �� =γt 2 � ∥w∗ t−1 − wt−1∥2 − E � ∥w∗ t−1 − wt∥2 + 2⟨w∗ t−1 − wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' wt − ˜wt⟩ + ∥wt − ˜wt∥2 | Ft−1 �� ≤γt 2 � ∥w∗ t−1 − wt−1∥2 − E �� 1 − ρλ 2γt � ∥w∗ t−1 − wt∥2 − 2γt ρλ ∥wt − ˜wt∥2 | Ft−1 �� ≤γt 2 � ∥w∗ t−1 − wt−1∥2 − E �� 1 − ρλ 2γt � ∥w∗ t − wt∥2 | Ft−1 �� + 2γtǫt ρλ =γt 2 � D2(wt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) − E �� 1 − ρλ 2γt � D2(wt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' W ∗) | Ft−1 �� + 2γtǫt ρλ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Combining the above three bounds yields E [R(wt) − R∗ | Ft−1] = A + B + C ≤3 2|A| + 8L γtnE [R(wt) | Ft−1] + γt 2 � D2(wt−1, W ∗) − E �� 1 − ρλ 2γt � D2(wt, W ∗) | Ft−1 �� + 2γtǫt ρλ ≤E � 3L 2γtnR(wt) | Ft−1 � + 3n 2 ǫt + 3G 2 �2ǫt γt + 8L γtnE [R(wt) | Ft−1] + γt 2 � D2(wt−1, W ∗) − E �� 1 − ρλ 2γt � D2(wt, W ∗) | Ft−1 �� + 2γtǫt ρλ ≤γt 2 � D2(wt−1, W ∗) − E �� 1 − ρλ 2γt � D2(wt, W ∗) | Ft−1 �� + 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5L γtn E [R(wt) | Ft−1] + �3n 2 + 2γt ρλ � ǫt + 3G 2 �2ǫt γt =γt 2 � D2(wt−1, W ∗) − E �� 1 − ρλ 2γt � D2(wt, W ∗) | Ft−1 �� + 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5L γtn E [R∗] + 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5L γtn E [R(wt) − R∗ | Ft−1] + �3n 2 + 2γt ρλ � ǫt + 3G 2 �2ǫt γt ≤γt 2 � D2(wt−1, W ∗) − E �� 1 − ρλ 2γt � D2(wt, W ∗) | Ft−1 �� + 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5L γtn E [R∗] + 1 2E [R(wt) − R∗ | Ft−1] + �3n 2 + 2γt ρλ � ǫt + 3G 2 � 2ǫt γt , where in the last inequality we have used the condition γt ≥ 19L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' After rearranging the terms in the above inequality we obtain the first desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 38 To derive the second bound, for any fixed w∗ ∈ W ∗, we note that the term C can be alternatively bounded as C ≤γt 2 � ∥w∗ − wt−1∥2 − E � ∥w∗ − wt∥2 + 2⟨w∗ − wt, wt − ˜wt⟩ + ∥wt − ˜wt∥2 | Ft−1 �� ≤γt 2 � ∥w∗ − wt−1∥2 − E � ∥w∗ − wt∥2 − 2∥wt − w∗∥∥wt − ˜wt∥ | Ft−1 �� ≤γt 2 � ∥w∗ − wt−1∥2 − E � ∥w∗ − wt∥2 | Ft−1 �� + � 2γtǫtE [∥w∗ − wt∥ | Ft−1] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Similar to the proof of the first bound, we can derive that ESt [R(wt) − R∗ | Ft−1] = A + B + C ≤3 2|A| + 8L γtnE [R(wt) | Ft−1] + γt 2 � ∥w∗ − wt−1∥2 − E � ∥w∗ − wt∥2 | Ft−1 �� + � 2γtǫtE [∥w∗ − wt∥ | Ft−1] ≤γt 2 � ∥w∗ − wt−1∥2 − E � ∥w∗ − wt∥2 | Ft−1 �� + 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5L γtn R∗ + 1 2E [R(wt) − R∗ | Ft−1] + 3n 2 ǫt + � 2γtǫtE [∥w∗ − wt∥ | Ft−1] + 3G 2 � 2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' After rearranging the terms in the above inequality we obtain the second desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' With the above preliminary results in hand, we are now in the position to prove the main result of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since by assumption R(wt) − R∗ ≥ λ 2 D2(wt, W ∗) and γt = λρt 4 ≥ λρ 4 ≥ 19L n , based on the first bound in Lemma 7 we can show that (1 − 2ρ)E [R(wt) − R∗ | Ft−1] ≤γtD2(wt−1, W ∗) − � γt + ρλ 2 � E � D2(wt, W ∗) | Ft−1 � + 19L γtn R∗ + � 3n + 4γt ρλ � ǫt + 3G �2ǫt γt ≤λρt 4 D2(wt−1, W ∗) − ρλ(t + 2) 4 E � D2(wt, W ∗) | Ft−1 � + 76L λρntR∗ + (3n + t) ǫt + 6G � 2ǫt λρt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Now suppose that ǫt ≤ ǫ nt4 for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since ρ ≤ 1/4, the above implies E [R(wt) − R∗ | Ft−1] ≤λρt 2 D2(wt−1, W ∗) − ρλ(t + 2) 2 E � D2(wt, W ∗) | Ft−1 � + 152L λρnt R∗ + � 6 t4 + 2 t3 + 12G � 2 λρt5 � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The above inequality then implies tE [R(wt) − R∗ | Ft−1] ≤(t + 1)E [R(wt) − R∗ | Ft−1] ≤λρt(t + 1) 2 D2(wt−1, W ∗) − λρ(t + 1)(t + 2) 2 E � D2(wt, W ∗) | Ft−1 � + 304L λρn R∗ + �12 t3 + 4 t2 + 24G t � 2 λρt � √ǫ, 39 where we have used the fact t+1 t ≤ 2 for t ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In view of the law of total expectation, summing the above inequality from t = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T with natural normalization yields 2 T(T + 1) T � t=1 tE [R(wt) − R∗] ≤ 2λρ T(T + 1)D2(w0, W ∗) + 608L λρ(T + 1)nR∗ + √ǫ T(T + 1) � 64 + 192G � 2 λρ � ≤ 4ρ T(T + 1)(R(w0) − R∗) + 608L λρ(T + 1)nR∗ + √ǫ T(T + 1) � 64 + 192G � 2 λρ � , which then immediately leads to the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The proof is concluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 Proof of Theorem 5 In this subsection, we prove Theorem 5 as following restated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 3 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≡ γ ≥ 19L n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ min � ǫ n2t5 , 2G2 9n2γ � for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the average output ¯wT = 1 T �T t=1 wt of Algorithm 1 satisfies E [R( ¯wT ) − R∗] ≲ γ T D2(w0, W ∗) + L γnR∗ + � L γn + γ LnT + G √γnT � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Particularly for γ = � T n + 19L n , it holds that E [R( ¯wT ) − R∗] ≲ � 1 √ nT + L nT � D2(w0, W ∗) + L √ nT R∗ + �L + G √ nT + 1 nT � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The following lemma, which can be proved by induction (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', Schmidt et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2011), will be used to prove the main result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that the nonnegative sequence {uτ}τ≥1 satisfies the following recursion for all t ≥ 1: u2 t ≤ St + t � τ=1 ατuτ, with {Sτ}τ≥1 an increasing sequence, S0 ≥ u2 0 and ατ ≥ 0 for all τ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then, the following bound holds for all t ≥ 1: ut ≤ � St + t � τ=1 ατ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 40 The following lemma gives an upper bound on the expected estimation error E [∥w∗ 0 − wt∥].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Under the conditions of Theorem 5, the following bound holds for all t ≥ 1: E [∥wt − w∗ 0∥] ≤ ∥w0 − w∗ 0∥ + � t γ R∗ + 6tG γ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Recall that w∗ 0 = arg minw∈W ∗ ∥w0 −w∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since γt ≡ γ ≥ 19L n , the second bound in Lemma 7 is valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' For any t ∈ [T], by summing that inequality with w∗ = w∗ 0 from τ = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', t we obtain t � τ=1 E [R(wτ) − R∗] + γE � ∥wt − w∗ 0∥2� ≤γ∥w0 − w∗ 0∥2 + 19L γn tR∗ + 3n t � τ=1 ǫτ + t � τ=1 � 2 � 2γE [∥w∗ 0 − wτ∥] + 3G � 2 γ � √ǫτ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='(16) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='Dropping the non-negative term �t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ES[τ] [R(wτ) − R∗] from the above inequality yields ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥wt − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='u2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + 19L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ2ntR∗ + 3n ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ǫτ + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ E [∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0 − wτ∥] + 3G ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√ǫτ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ζ1≤∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ R∗ + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3n ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ ǫτ + 3G ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√ǫτ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2ǫτ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='E [∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0 − wτ∥2] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤ ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ R∗ + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='4G√2ǫτ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='St ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='τ=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8eb ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8ec ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8ec ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8ed2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2ǫτ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� �� � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ατ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='E [∥w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0 − wτ∥2] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='uτ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8f6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8f7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8f7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='\uf8f8 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' where in “ζ1” we have used γ ≥ 19L n and the basic inequality E2[X] ≤ E[X2],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' and in the last inequality we have used the condition ǫτ ≤ 2G2 9n2γ for all τ ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' By invoking Lemma 8 to the above recursion form we can derive that for all t ≥ 1, � E [∥wt − w∗ 0∥2] ≤ � � � �∥w0 − w∗ 0∥2 + t γ R∗ + t � τ=1 4G√2ǫτ γ + t � τ=1 2 �2ǫτ γ ≤∥w0 − w∗ 0∥ + � t γ R∗ + t � τ=1 � 4G√2ǫτ γ√γ + t � τ=1 2 � 2ǫτ γ ≤∥w0 − w∗ 0∥ + � t γ R∗ + 6Gt γ , where the last inequality is due to the condition ǫτ ≤ 2G2 9γ for all τ ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The above inequality then directly implies the desired bound for all t ∈ [T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 41 Now we are ready to prove the main result of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Dropping non-negative term γE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='∥wt − w∗∥2� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='in (16) followed by natural nor- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='malization yields ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='E [R(wt) − R∗] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤ γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + 19L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γn R∗ + 3n ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ǫt + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2γE [∥wt − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥] + 3G ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� √ǫt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ζ1≤ γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + 19L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γn R∗ + 3n ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ǫt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='�√γ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥ + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='tR∗ + 6Gt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 3G ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� 2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� √ǫt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ζ2≤ γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + 19L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γn R∗ + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3nǫt + 2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2γǫt∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥ + 2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2tR∗ǫt + 15√2ǫtGt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='ζ3≤ γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + 19L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γn R∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3nǫt + γ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 2t2ǫt + 2LR∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γn ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ γntǫt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 15√2ǫtGt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='≤3γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ∥w0 − w∗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='0∥2 + 21L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='γn R∗ + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='T ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='t=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3nǫt + 2t2ǫt + γntǫt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='+ 15√2ǫtGt ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='√γ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=',' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' where “ζ1”follows from Lemma 9,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' “ζ2” is due to t ≥ 1 and “ζ3” is due to ab ≤ (a2 + b2)/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Now consider ǫt ≤ ǫ n2t5 for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then it follows from the preceding inequality that 1 T T � t=1 E [R(wt) − R∗] ≤3γ T ∥w0 − w∗ 0∥2 + 21L γn R∗ + 1 T T � t=1 � 3 nt5 + 2 nt3 + γ nLt4 + 15 √ 2G nt1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='5√γ � √ǫ ≤3γ T ∥w0 − w∗ 0∥2 + 21L γn R∗ + 1 T � 6 n + 4 n + 2γ nL + 45 √ 2G n√γ � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ¯wT = 1 T �T t=1 wt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Combined with the convexity of R, the above inequality implies E [R( ¯wT ) − R∗] ≲ γ T D2(w0, W ∗) + L γnR∗ + � 1 nT + γ LnT + G nT√γ � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This proves the first bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Substituting γ = � T n + 19L n > 19L n into the above bound and preserving the leading terms yields the following second desired bound: E [R( ¯wT ) − R∗] ≲ � 1 √ nT + L nT � D2(w0, W ∗) + L √ nT R∗ + �L + G √ nT + 1 nT � √ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 42 The proof is concluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' C Proofs for the Results in Section 4 In this section, we present the proofs for the high probability estimation error bounds stated in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='1 Proof of Proposition 1 In this subsection, we prove Proposition 1 as below restated .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumption 1 holds and the loss function is bounded such that 0 ≤ ℓ(y′, y) ≤ M for all y, y′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let S = {St}t∈[T] and S′ = {S′ t}t∈[T] be two sets of data minibatches satisfying S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then (a) The weighted average output ¯wT and ¯w′ T respectively generated by M-SPP (Algorithm 1) over S and S′ satisfy sup S,S′ ∥ ¯wT − ¯w′ T ∥ ≤ 4 √ 2LM n mint∈[T] γt + T � t=1 2 �2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (b) The weighted average output ¯wT and ¯w′ T respectively generated by M-SPP-SWoR (Algo- rithm 3) over S and S′ satisfy sup S,S′ Eξ[T ] � ∥ ¯wT − ¯w′ T ∥ � ≤ T � t=1 � 4 √ 2LM nTγt + 2 �2ǫt γt � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We first need to show the following preliminary result which is about the expansion property of M-SPP update when performed over identical or different minibatches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 holds and the loss function ℓ is bounded in the interval [0, M].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' From w0 = w′ 0, let us define the sequences {wt}t∈[T] and {w′ t}t∈[T] that are respectively generated over {St}t∈[T] and {S′ t}t∈[T] according to Ft(wt) ≤ min w∈W � Ft(w) := RSt(w) + γt 2 ∥w − wt−1∥2� + ǫt, F ′ t(w′ t) ≤ min w∈W � F ′ t(w) := RS′ t(w) + γt 2 ∥w − w′ t−1∥2� + ǫt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that either St = S′ t or St .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′ t for all t ∈ [T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let βt = 1{St̸=S′ t}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the following bound holds for all t ∈ [T], ∥wt − w′ t∥ ≤ t � τ=1 � βτ 4 √ 2LM nγτ + 2 �2ǫτ γτ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 43 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let w∗ t = arg minw Ft(w) and w′∗ t = arg minw F ′ t(w).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It follows from Lemma 1 that RSt(w∗ t ) − RSt(w′∗ t ) ≤γt 2 � ∥w′∗ t − wt−1∥2 − ∥w′∗ t − w∗ t ∥2 − ∥w∗ t − wt−1∥2� RS′ t(w′∗ t ) − RS′ t(w∗ t ) ≤γt 2 � ∥w∗ t − w′ t−1∥2 − ∥w′∗ t − w∗ t ∥2 − ∥w′∗ t − w′ t−1∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Summing both sides of the above two inequalities yields RSt(w∗ t ) − RSt(w′∗ t ) + RS′ t(w′∗ t ) − RS′ t(w∗ t ) ≤γt 2 � ∥w′∗ t − wt−1∥2 − ∥w∗ t − wt−1∥2 + ∥w∗ t − w′ t−1∥2 − ∥w′∗ t − w′ t−1∥2 − 2∥w′∗ t − w∗ t ∥2� =γt 2 � 2⟨w∗ t − w′∗ t , wt−1 − w′ t−1⟩ − 2∥w′∗ t − w∗ t ∥2� ≤γt 2 � ∥wt−1 − w′ t−1∥2 − ∥w∗ t − w′∗ t ∥2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We need to distinguish the following two complementary cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Case I: St = S′ t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this case, the previous inequality immediately leads to ∥w∗ t − w′∗ t ∥ ≤ ∥wt−1 − w′ t−1∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' By using triangle inequality and Lemma 6 we obtain ∥wt − w′ t∥ ≤ ∥wt − w∗ t ∥ + ∥w∗ t − w′∗ t ∥ + ∥w′ t − w′∗ t ∥ ≤ ∥wt−1 − w′ t−1∥ + 2 �2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (17) Case II: St and S′ t differ in a single element.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In this case, we have ∥w∗ t − w′∗ t ∥2 ≤∥wt−1 − w′ t−1∥2 + 2 γt � RSt(w′∗ t ) − RSt(w∗ t ) + RS′ t(w∗ t ) − RS′ t(w′∗ t ) � =∥wt−1 − w′ t−1∥2 + 2 γt � Rℓ St(w′∗ t ) − Rℓ St(w∗ t ) + Rℓ S′ t(w∗ t ) − Rℓ S′ t(w′∗ t ) � =∥wt−1 − w′ t−1∥2 + 2 γt \uf8eb \uf8ed 1 |St| � z∈St (ℓ(w′∗ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) − ℓ(w∗ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) + 1 |S′ t| � z∈S′ t (ℓ(w∗ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) − ℓ(w′∗ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z)) \uf8f6 \uf8f8 ≤∥wt−1 − w′ t−1∥2 + 4 √ 2LM nγt ∥w∗ t − w′∗ t ∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' where in the last inequality we have used ℓ(·;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ·) is √ 2LM-Lipschitz with respect to its first argument which is implied by Lemma 2, and St and S′ t differ in a single element as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since x2 ≤ y2 + ax implies x ≤ y + a for all x, y, a > 0, we can derive from the above that ∥w∗ t − w′∗ t ∥ ≤ ∥wt−1 − w′ t−1∥ + 4 √ 2LM nγt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then based on triangle inequality and Lemma 6 we have ∥wt − w′ t∥ ≤ ∥wt − w∗ t ∥ + ∥w∗ t − w′∗ t ∥ + ∥w′ t − w′∗ t ∥ ≤ ∥wt−1 − w′ t−1∥ + 4 √ 2LM nγt + 2 �2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (18) 44 Let βt = 1{St̸=S′ t} where 1{C} is the indicator function of the condition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Based on the recursion forms (17) and (18) and the condition w0 = w′ 0 we can show that for all t ∈ [T] ∥wt − w′ t∥ ≤ t � τ=1 � 4βτ √ 2LM nγτ + 2 �2ǫτ γτ � , which is the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Now we are in the position to prove the main result in Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Consider a fixed pair of minibatch sets S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Part (a): Let {wt}t∈[T] and {w′ t}t∈[T] be two solution sequences that are respectively generated over {St}t∈[T] and {S′ t}t∈[T] by Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' At each time instance t, define random variable βt := 1{St̸=S′ t}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since by assumption S and S′ differ only in a single minibatch, there must exist one and only one t ∈ [T] such that βt = 1 and βj = 0 for all j ∈ [T], j ̸= t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then in the worst case of βτ = 1 for τ = arg mini∈[t] γi, it follows from Lemma 10 that for all t ∈ [T], ∥wt − w′ t∥ ≤ 4 √ 2LM n mini∈[t] γi + t � i=1 2 �2ǫi γi ≤ 4 √ 2LM n mini∈[T] γi + T � i=1 2 �2ǫi γi .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the convex combination nature of ¯wT and ¯w′ T implies that ∥ ¯wT − ¯w′ T ∥ ≤ � t γt∥wt − w′ t∥ � t γt ≤ 4 √ 2LM n mint∈[T] γt + T � t=1 2 � 2ǫt γt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The desired result follows immediately as the above bound holds for any pair {S, S′}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Part (b): Recall that {ξt}t∈[T] are the uniform random indices for iteratively selecting data minibatches from S and S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let {wt}t∈[T] and {w′ t}t∈[T] be two solution sequences that are respectively generated over {Sξt}t∈[T] and {S′ ξt}t∈[T] by Algorithm 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Define random variable βt := 1� Sξt̸=S′ ξt �.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since by assumption S and S′ differ only in a single minibatch, under without- replacement sampling scheme, there must exist one and only one t ∈ [T] such that βt = 1 and βj = 0 for all j ∈ [T], j ̸= t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let us define the event Et := {βt = 1 and βj̸=t,j∈[T] = 0} for all t ∈ [T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then the uniform randomness of ξt implies that R (Et) = 1 T , t ∈ [T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Given t ∈ [T], suppose that Eτ occurs for some τ ∈ [t].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then it follows from Lemma 10 that ∥wt − w′ t∥ ≤ 4 √ 2LM nγτ + t � i=1 2 �2ǫi γi .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 45 Suppose that Eτ occurs for some τ ∈ {t + 1, t + 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', T}, again it follows from Lemma 10 that ∥wt − w′ t∥ ≤ t � i=1 2 �2ǫi γi .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then we have Eξ[t] � ∥wt − w′ t∥ � = T � τ=1 R (Eτ) � ∥wt − w′ t∥ | Eτ � ≤ t � τ=1 � 4 √ 2LM nTγt + t � i=1 2 T � 2ǫi γi � + T � τ=t+1 � t � i=1 2 T � 2ǫt γt � = t � τ=1 � 4 √ 2LM nTγτ + 2 � 2ǫτ γτ � ≤ T � t=1 � 4 √ 2LM nTγt + 2 � 2ǫt γt � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It follows that Eξ[T ] � ∥ ¯wT − ¯w′ T ∥ � ≤ � t γtEξ[t] [∥wt − w′ t∥] � t γt ≤ T � t=1 � 4 √ 2LM nTγt + 2 � 2ǫt γt � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The desired result follows immediately as the above bound holds for any pair {S, S′}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='2 Proof of Theorem 6 In this subsection, we prove Theorem 6 that is restated below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1, 2, 3 hold and the loss function ℓ is bounded in the interval (0, M].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let ρ ∈ (0, 1/4] be an arbitrary scalar and set γt = λρt 4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that n ≥ 76L λρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ min � ǫ nt4 , LM λρn2T 2t � for some ǫ ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then with probability at least 1 − δ over S, the weighted average output ¯wT of M-SPP-SWoR (Algorithm 3) satisfies Eξ[T ] [D( ¯wT , W ∗)] ≲ � LM log(1/δ) log(T) λρ √ nT + � ρ [R(w0) − R∗] λT 2 + L λ2ρnT R∗ + √ǫ λT 2 � L λρ + G � 1 λρ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' To show this result, we need to use the following restated McDiarmid’s inequality (McDiarmid, 1989) which is also known as bounded difference inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 11 (McDiarmid’s/Bounded differences inequality).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let X1, X2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', XN be independent ran- dom variables valued in X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that the function h : X N �→ R satisfies the bounded differences property, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', the following inequality holds for any i ∈ [N] and any x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', xN, x′ i: |h(x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', xi−1, xi, xi+1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', xN) − h(x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', xi−1, x′ i, xi+1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', xN)| ≤ ci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any ε > 0, P (h(X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', XN) − E [h(X1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', XN)] ≥ ε) ≤ exp � − 2ε2 �N i=1 c2 i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 46 Now we are ready to prove Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proof of Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let S = {St}t∈[T] and S′ = {S′ t}t∈[T] be two sets of data minibatches such that S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then according to Proposition 1 the weighted average output ¯wT and ¯w′ T respectively generated by Algorithm 3 over S and S′ satisfy sup S,S′ Eξ[T ] � ∥ ¯wT − ¯w′ T ∥ � ≤ T � t=1 � 4 √ 2LM nTγt + 2 �2ǫt γt � ≤ T � t=1 � 5 √ 2LM nTγt � ≤ 20 √ 2LM(1 + log(T)) λρnT , where in the last but one inequality we have used the condition ǫt ≤ LM 4n2T 2γt = LM λρN2t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It follows from the triangle inequality and the above bound that sup S,S′ Eξ[T ] ���D( ¯wT , W ∗) − D( ¯w′ T , W ∗) ��� ≤ sup S,S′ Eξ[T ] � ∥ ¯wT − ¯w′ T ∥ � ≤ 20 √ 2LM(1 + log(T)) λρnT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Since ξ[T] are independent on S, as a direct consequence of applying McDiarmid’s inequality with ci ≡ c = 20 √ 2LM(1+log(T)) λρnT to h(S) := D( ¯wT , W ∗), we can show that with probability at least 1 − δ over the randomness of S, Eξ[T ] � D( ¯wT , W ∗) − ES � Eξ[T ] [D( ¯wT , W ∗)] �� ≤ c � nT log(1/δ) 2 = 20 � LM log(1/δ)(1 + log(T)) λρ √ nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We next derive a bound for ES [D( ¯wT , W ∗)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In view of Jensen’s inequality and the quadratic growth property of F we have ES � Eξ[T ] [D( ¯wT , W ∗)] � =Eξ[T ] [ES [D( ¯wT , W ∗)]] ≤Eξ[T ] �� ES [D2( ¯wT , W ∗)] � ≤Eξ[T ] �� 2 λES [R( ¯wT ) − R∗] � ≲Eξ[T ] \uf8ee \uf8f0 � ρ [R(w0) − R∗] λT 2 + L λ2ρnT R∗ + √ǫ λT 2 � L λρ + G � 1 λρ �\uf8f9 \uf8fb = � ρ [R(w0) − R∗] λT 2 + L λ2ρnT R∗ + √ǫ λT 2 � L λρ + G � 1 λρ � , where in the last inequality we have invoked Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Therefore, based on the previous two inequalities we obtain that with probability at least 1 − δ over S, Eξ[T ] [D( ¯wT , W ∗)] ≲ � LM log(1/δ) log(T) λρ √ nT + � ρ [R(w0) − R∗] λT 2 + L λ2ρnT R∗ + √ǫ λT 2 � L λρ + G � 1 λρ � , which gives the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 47 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='3 Proof of Theorem 7 Here we prove the following restated Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that Assumptions 1 and 3 hold and the loss function ℓ is bounded in the interval [0, M].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Set γt ≡ � T n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume that ǫt ≤ LM 4nT 2√ nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then with probability at least 1 − δ over S, the average output ¯wT = 1 T �T t=1 wt of M-SPP (Algorithm 1) satisfies |R( ¯wT ) − RS( ¯wT )| ≲ (LM + G √ LM) log(N) log(1/δ) √ nT + M � log (1/δ) nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' We need the following lemma essentially from Bousquet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2020, Corollary 8) that gives a near-tight generalization bound for a learning algorithm that is uniformly stable with respect to loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lemma 12 (Bousquet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' (2020)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Suppose that a learning algorithm Aw, parameterized by w, satisfies |ℓ(AwS(x), y) − ℓ(AwS′(x), y)| ≤ ̺ for any (x, y) ∈ X × Y and S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Assume the loss function satisfies 0 ≤ ℓ(y′, y) ≤ M for all y, y′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then for any δ ∈ (0, 1), with probability at least 1 − δ over an i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' data set S of size N, |R(AwS) − RS(AwS)| ≲ ̺ log(N) log �1 δ � + M � log (1/δ) N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' With this lemma in place, we can prove the main result in Theorem 7 Proof of Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Let S = {St}t∈[T] and S′ = {S′ t}t∈[T] be two sets of data minibatches satisfying S .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content='= S′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Note that γt ≡ γ = � T n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Then according to Proposition 1 the average output ¯wT and ¯w′ T respectively generated by Algorithm 1 over S and S′ satisfy sup S,S′ ∥ ¯wT − ¯w′ T ∥ ≤ 4 √ 2LM nγ + T � t=1 2 �2ǫt γ ≤ 5 √ 2LM nγ = 5 √ 2LM √ nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' where in the last but one inequality we have used the condition ǫt ≤ LM 4nT 2√ N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' It follows that |ℓ( ¯wT ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z) − ℓ( ¯w′ T ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' z)| ≤ √ 2ML∥ ¯wT − ¯w′ T ∥ ≤ 10LM √ nT , where we have used ℓ(·;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' ·) is √ 2LM-Lipschitz with respect to its first argument (which is implied by Lemma 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In view of Assumption 3 we have |r( ¯wT ) − r( ¯w′ T )| ≤ G∥ ¯wT − ¯w′ T ∥ ≤ 5G √ 2LM √ nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' This preceding two inequalities indicate that M-SPP is 10LM+5G √ 2LM √ nT uniformly stable with respect to the composite loss function ℓ + r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' By invoking Lemma 12 to M-SPP we obtain that |R(wS) − RS(wS)| ≲ (LM + G √ LM) log(nT) √ nT log �1 δ � + M � log (1/δ) nT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The proof is concluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 48 References Alekh Agarwal, Peter L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bartlett, Pradeep Ravikumar, and Martin J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wainwright.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Information- theoretic lower bounds on the oracle complexity of stochastic convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theory, 58(5):3235–3249, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3, 6 Zeyuan Allen-Zhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Katyusha: The first direct acceleration of stochastic gradient methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 18:221:1–221:51, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Mihai Anitescu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Degenerate nonlinear programming with a quadratic growth condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 10(4):1116–1135, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 9 Hilal Asi and John C Duchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The importance of better models in stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Proceed- ings of the National Academy of Sciences, 116(46):22924–22930, 2019a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2, 6, 13, 35 Hilal Asi and John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Duchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stochastic (approximate) proximal point methods: Convergence, optimality, and adaptivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 29(3):2257–2290, 2019b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2, 3, 6, 13, 18, 20, 35 Hilal Asi, Karan N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Chadha, Gary Cheng, and John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Duchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Minibatch stochastic approximate proximal point methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), virtual, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2, 3, 5, 7, 18, 20, 24 Francis R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bach and Eric Moulines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Non-asymptotic analysis of stochastic approximation algorithms for machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 451– 459, Granada, Spain, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Peter L Bartlett, Olivier Bousquet, and Shahar Mendelson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Local rademacher complexities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The Annals of Statistics, 33(4):1497–1537, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Raef Bassily, Vitaly Feldman, Crist´obal Guzm´an, and Kunal Talwar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stability of stochastic gradient descent on nonsmooth convex losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), virtual, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 8 Dimitri P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bertsekas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Incremental proximal methods for large scale convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 129(2):163–195, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3, 6, 18 L´eon Bottou and Olivier Bousquet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The tradeoffs of large scale learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 161–168, Vancouver, Canada, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 L´eon Bottou, Frank E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Curtis, and Jorge Nocedal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optimization methods for large-scale machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 60(2):223–311, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Olivier Bousquet and Andr´e Elisseeff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stability and generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 2:499–526, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4, 7, 16 49 Olivier Bousquet, Yegor Klochkov, and Nikita Zhivotovskiy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Sharper bounds for uniformly stable algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the Conference on Learning Theory (COLT), pages 610–626, Virtual Event [Graz, Austria], 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 48 Karan N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Chadha, Gary Cheng, and John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Duchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Accelerated, optimal and parallel: Some results on model-based stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the International Conference on Machine Learning (ICML), pages 2811–2827, Baltimore, MD, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 25 Ronan Collobert, Samy Bengio, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A parallel mixture of svms for very large scale problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 633–640, Vancouver, Canada], 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 23 Koby Crammer, Ofer Dekel, Joseph Keshet, Shai Shalev-Shwartz, and Yoram Singer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Online passive-aggressive algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 7:551–585, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Damek Davis and Dmitriy Drusvyatskiy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stochastic model-based minimization of weakly convex functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 29(1):207–239, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2, 3, 4, 5, 6, 11, 18, 20 Aaron Defazio, Francis R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bach, and Simon Lacoste-Julien.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SAGA: A fast incremental gradi- ent method with support for non-strongly convex composite objectives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 1646–1654, Montreal, Canada, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Qi Deng and Wenzhi Gao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Minibatch and momentum model-based methods for stochastic weakly convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), pages 23115–23127, virtual, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 8, 25 Aymeric Dieuleveut, Nicolas Flammarion, and Francis R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Harder, better, faster, stronger convergence rates for least-squares regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 18:101:1–101:51, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4, 18, 19, 20 Dmitriy Drusvyatskiy and Adrian S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lewis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Error bounds, quadratic growth, and linear convergence of proximal methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Oper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 43(3):919–948, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4 John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Duchi and Feng Ruan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stochastic methods for composite and weakly convex optimization problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 28(4):3229–3259, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 18 John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Duchi, Shai Shalev-Shwartz, Yoram Singer, and Ambuj Tewari.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Composite objective mirror descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 23rd Conference on Learning Theory (COLT), pages 14–26, Haifa, Israel, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Vitaly Feldman and Jan Vondr´ak.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Generalization bounds for uniformly stable algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), pages 9770–9780, Montr´eal, Canada, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4, 7 50 Vitaly Feldman and Jan Vondr´ak.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' High probability generalization bounds for uniformly stable algo- rithms with nearly optimal rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the Conference on Learning Theory (COLT), pages 1270–1279, Phoenix, AZ, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 16 Roy Frostig, Rong Ge, Sham M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Kakade, and Aaron Sidford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Competing with the empirical risk minimizer in a single pass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of The 28th Conference on Learning Theory (COLT), pages 728–763, Paris, France, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Saeed Ghadimi and Guanghui Lan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optimal stochastic approximation algorithms for strongly convex stochastic composite optimization I: A generic algorithmic framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 22(4):1469–1492, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Isabelle Guyon, Steve R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Gunn, Asa Ben-Hur, and Gideon Dror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Result analysis of the NIPS 2003 feature selection challenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 545–552, Vancouver, Canada], 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 23 Moritz Hardt, Ben Recht, and Yoram Singer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Train faster, generalize better: Stability of stochastic gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 33nd International Conference on Machine Learning (ICML), pages 1225–1234, New York City, NY, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 16 Chonghai Hu, James T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Kwok, and Weike Pan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Accelerated gradient methods for stochastic op- timization and online learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 781–789, Vancouver, Canada, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Martin Jaggi, Virginia Smith, Martin Tak´ac, Jonathan Terhorst, Sanjay Krishnan, Thomas Hof- mann, and Michael I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Jordan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Communication-efficient distributed dual coordinate ascent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Ad- vances in Neural Information Processing Systems (NIPS), pages 3068–3076, Montreal, Canada, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Rie Johnson and Tong Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Accelerating stochastic gradient descent using predictive variance reduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 315–323, Lake Tahoe, NV, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3, 7 Ellango Jothimurugesan, Ashraf Tahmasbi, Phillip B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Gibbons, and Srikanta Tirthapura.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Variance- reduced stochastic gradient descent on streaming data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), pages 9928–9937, Montr´eal, Canada, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Hamed Karimi, Julie Nutini, and Mark Schmidt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Linear convergence of gradient and proximal- gradient methods under the polyak-�lojasiewicz condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the European Con- ference on Machine Learning and Knowledge Discovery in Databases (ECML/PKDD), Part I, pages 795–811, Riva del Garda, Italy, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4, 9 Hiroyuki Kasai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SGDLibrary: A MATLAB library for stochastic optimization algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 18:215:1–215:5, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 23 51 Yegor Klochkov and Nikita Zhivotovskiy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stability and deviation optimal risk bounds with con- vergence rate o(1/n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), pages 5065–5076, virtual, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Vladimir Koltchinskii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Local rademacher complexities and oracle inequalities in risk minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The Annals of Statistics, 34(6):2593–2656, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Brian Kulis and Peter L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bartlett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Implicit online learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 27th International Conference on Machine Learning (ICML), pages 575–582, Haifa, Israel, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Andrei Kulunchakov and Julien Mairal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A generic acceleration framework for stochastic composite optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), pages 12556– 12567, Vancouver, Canada, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Guanghui Lan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' An optimal method for stochastic composite optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 133 (1-2):365–397, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Jason D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Lee, Qihang Lin, Tengyu Ma, and Tianbao Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Distributed stochastic variance reduced gradient methods by sampling extra data with replacement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 18:122:1– 122:43, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Erich L Lehmann and George Casella.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theory of point estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Springer Science & Business Media, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Yunwen Lei and Yiming Ying.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Fine-grained analysis of stability and generalization for stochastic gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 37th International Conference on Machine Learning (ICML), pages 5809–5819, Virtual Event, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 11, 13, 19, 20 Mu Li, Tong Zhang, Yuqiang Chen, and Alexander J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Smola.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Efficient mini-batch training for stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 20th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), pages 661–670, New York, NY, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 11, 24 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' McDiarmid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Surveys in combinatorics, 1989: On the method of bounded differences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 46 Song Mei, Yu Bai, and Andrea Montanari.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The landscape of empirical risk for nonconvex losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The Annals of Statistics, 46(6A):2747–2774, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Sayan Mukherjee, Partha Niyogi, Tomaso A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Poggio, and Ryan M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Rifkin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learning theory: sta- bility is sufficient for generalization and necessary and sufficient for consistency of empirical risk minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 25(1-3):161–193, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Sahand N Negahban, Pradeep Ravikumar, Martin J Wainwright, Bin Yu, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A unified framework for high-dimensional analysis of m-estimators with decomposable regularizers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Statistical Science, 27(4):538–557, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2 52 Arkadi Nemirovski, Anatoli B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Juditsky, Guanghui Lan, and Alexander Shapiro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Robust stochastic approximation approach to stochastic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 19(4):1574–1609, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Arkaddii S Nemirovskii and Yu E Nesterov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optimal methods of smooth convex minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' USSR Computational Mathematics and Mathematical Physics, 25(2):21–30, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 12 Andrei Patrascu and Ion Necoara.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Nonasymptotic convergence of stochastic proximal point methods for constrained convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 18:198:1–198:42, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2, 3, 5, 6, 18, 20 Alexander Rakhlin, Ohad Shamir, and Karthik Sridharan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Making gradient descent optimal for strongly convex stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 29th International Conference on Machine Learning (ICML), Edinburgh, Scotland, UK, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4, 6, 18, 20 Pradeep Ravikumar, John Lafferty, Han Liu, and Larry Wasserman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Sparse additive models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 71(5):1009–1030, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2 James Renegar and Benjamin Grimmer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A simple nearly optimal restart scheme for speeding up first-order methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 22(1):211–256, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 12 Herbert Robbins and Sutton Monro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A stochastic approximation method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The Annals of Mathe- matical Statistics, pages 400–407, 1951.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Herbert Robbins and David Siegmund.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A convergence theorem for non negative almost super- martingales and some applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Optimizing methods in statistics, pages 233–257.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 1971.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 35 Mark Schmidt, Nicolas Le Roux, and Francis R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Bach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Convergence rates of inexact proximal- gradient methods for convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 1458–1466, Granada, Spain, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 40 Shai Shalev-Shwartz, Ohad Shamir, Nathan Srebro, and Karthik Sridharan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learnability, stability and uniform convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 11:2635–2670, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 11 Ohad Shamir, Nathan Srebro, and Tong Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Communication-efficient distributed optimization using an approximate newton-type method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 31th International Conference on Machine Learning (ICML), pages 1000–1008, Beijing, China, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3, 7 Nathan Srebro, Karthik Sridharan, and Ambuj Tewari.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Smoothness, low noise and fast rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NIPS), pages 2199–2207, Vancouver, Canada, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 8, 10, 11, 13, 19, 20, 27 53 Panos Toulis and Edoardo M Airoldi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Asymptotic and finite-sample properties of estimators based on stochastic gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The Annals of Statistics, 45(4):1694–1727, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 5, 6 Panos Toulis, Dustin Tran, and Edoardo M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Airoldi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Towards stability and optimality in stochastic gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 19th International Conference on Artificial Intelligence and Statistics (AISTATS), pages 1290–1298, Cadiz, Spain, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6, 18 Alexandre B Tsybakov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Introduction to nonparametric estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Springer Science & Business Media, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 5, 18 Sara A Van de Geer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' High-dimensional generalized linear models and the lasso.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' The Annals of Statistics, 36(2):614–645, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2 Vladimir Vapnik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' An overview of statistical learning theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Neural Networks, 10(5): 988–999, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Martin J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Wainwright.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Sharp thresholds for high-dimensional and noisy sparsity recovery using l1-constrained quadratic programming (lasso).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theory, 55(5):2183–2202, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 21 Jialei Wang, Mladen Kolar, Nathan Srebro, and Tong Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Efficient distributed learning with sparsity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 34th International Conference on Machine Learning (ICML), pages 3636–3645, Sydney, Australia, 2017a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 24 Jialei Wang, Weiran Wang, and Nathan Srebro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Memory and communication efficient distributed stochastic optimization with minibatch prox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 30th Conference on Learning Theory (COLT), pages 1882–1919, Amsterdam, The Netherlands, 2017b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2, 3, 4, 5, 7, 8, 10, 11, 13, 14, 19, 20, 26 Blake E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Woodworth and Nathan Srebro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' An even more optimal stochastic optimization algorithm: Minibatching and interpolation learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (NeurIPS), pages 7333–7345, virtual, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 4, 19, 20 Lin Xiao and Tong Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' A proximal stochastic gradient method with progressive variance re- duction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 24(4):2057–2075, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3, 7, 14 Yang You, Jing Li, Sashank J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiao- dan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Large batch optimization for deep learning: Training BERT in 76 minutes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 8th International Conference on Learning Representations (ICLR), Addis Ababa, Ethiopia, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 11 Xiao-Tong Yuan and Ping Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' On convergence of distributed approximate newton methods: Glob- alization, sharper bounds and beyond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 21:206:1–206:51, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3 54 Xiao-Tong Yuan and Ping Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Stability and risk bounds of iterative hard thresholding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Theory, 68(10):6663–6681, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 8 Lijun Zhang, Tianbao Yang, and Rong Jin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Empirical risk minimization for stochastic convex optimization: o(1/n)-and o(1/n2)-type of risk bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Conference on Learning Theory, pages 1954–1979, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7, 19, 20 Tong Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Leave-one-out bounds for kernel methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Neural Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=', 15(6):1397–1437, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 7 Tong Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Solving large scale linear prediction problems using stochastic gradient descent al- gorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the Twenty-first International Conference on Machine Learning (ICML), Banff, Alberta, Canada, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 6 Yuchen Zhang and Xiao Lin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' DiSCO: Distributed optimization for self-concordant empirical loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the 32nd International Conference on Machine Learning (ICML), pages 362–370, Lille, France, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 3, 7 Kaiwen Zhou, Lai Tian, Anthony Man-Cho So, and James Cheng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Practical schemes for finding near-stationary points of convex finite-sums.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Proceedings of the International Conference on Artificial Intelligence and Statistics (AISTATS), pages 3684–3708, Virtual Event, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 12 Pan Zhou, Xiaotong Yuan, Huan Xu, Shuicheng Yan, and Jiashi Feng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' Efficient meta learning via minibatch proximal update.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems (2019), pages 1532–1542, Vancouver, Canada, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} +page_content=' 2 55' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GtE1T4oBgHgl3EQfXQSk/content/2301.03125v1.pdf'} diff --git a/HNAyT4oBgHgl3EQffPjW/content/tmp_files/2301.00338v1.pdf.txt b/HNAyT4oBgHgl3EQffPjW/content/tmp_files/2301.00338v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..92716e766c4b0fed1b62baae1d6e84c130e6e244 --- /dev/null +++ b/HNAyT4oBgHgl3EQffPjW/content/tmp_files/2301.00338v1.pdf.txt @@ -0,0 +1,547 @@ +Unification of thermal and quantum noise in gravitational-wave detectors +Chris Whittle,1, ∗ Lee McCuller,2 Vivishek Sudhir,1, 3, † and Matthew Evans1 +1LIGO, Massachusetts Institute of Technology, Cambridge, MA 02139, USA +2LIGO, California Institute of Technology, Pasadena, CA 91125, USA +3Department of Mechanical Engineering, Massachusetts Institute of Technology, Cambridge, MA 02139 +(Dated: January 3, 2023) +Contemporary gravitational-wave detectors are fundamentally limited by thermal noise—due to +dissipation in the mechanical elements of the test mass—and quantum noise—from the vacuum +fluctuations of the optical field used to probe the test mass position. Two other fundamental noises +can in principle also limit sensitivity: test-mass quantization noise due to the zero-point fluctuation +of its mechanical modes, and thermal excitation of the optical field. We use the quantum fluctuation- +dissipation theorem to unify all four noises. This unified picture shows precisely when test-mass +quantization noise and optical thermal noise can be ignored. +Introduction.—Fundamental constraints on the sensi- +tivity of gravitational-wave (GW) detectors arise from +classical and quantum fluctuations. At present, each of +these noises is modeled using different techniques. Ther- +mal noise—due to the Brownian motion of the mechani- +cal test mass, its suspension, and the mirror coating on +the test mass—is derived from the (classical) fluctuation- +dissipation theorem [1, 2]. Quantum noise—due to the +vacuum fluctuations in the phase and amplitude of the +optical field used to measure the test mass position—is +derived from quantum electrodynamics in the so-called +“two-photon formalism” [3–7]. The sum of these noises +limit the performance of today’s GW detectors: quan- +tum fluctuations in the amplitude of the optical field +drive the motion of the test mass in the ∼20-50 Hz +range [8, 9], Brownian motion of the mirror coatings dom- +inate in the ∼50-200 Hz [10], and quantum fluctuations +in the phase of the optical field sets the sensitivity above +200 Hz [11, 12]. +In principle there exist noises that are exactly com- +plementary, i.e. +quantum noise of the mechanical de- +grees of freedom and thermal noise of the optical field. +The former is a consequence of quantizing the mechan- +ical motion of the interferometer test masses and the +zero-point fluctuations that manifest as a result. In fact, +Braginsky et al. studied the role of test-mass quantiza- +tion noise [13], concluding that “test-mass quantization +is irrelevant [. . . ] if one filters the output data appropri- +ately”. On the other hand, thermal fluctuations of the +optical field—for example due to blackbody radiation— +can contribute excess noise. +We show that the four fundamental noises described +above—thermal and quantum noises of the mechanical +and optical degrees of freedom—can all be treated uni- +formly using the quantum extension of the fluctuation- +dissipation theorem [14–19]. This perspective enables a +simple treatment of the test-mass quantum noise that is +independent of the detector topology, instead depending +only on the relative thermal and optical quantum energy +scales. In doing so, we extend the analysis of Ref. [13] to +incorporate mechanical losses and to allow for differing +GW detector optical topologies or arbitrarily complex +test mass suspensions. We find that test-mass quanti- +zation noise is negligible in principle—i.e. independent +of any “filtering”—so long as the mechanical degrees of +freedom of the test mass resonate at acoustic frequencies +(Ωm) and the detector is operated at a temperature +T > ℏΩm/kB ≈ (5 × 10−11 K) · +� +Ωm +2π · 1 Hz +� +. +Likewise, optical thermal noise at the carrier frequency +ωo is negligible compared to its quantum noise as long as +GW detectors are operated at a temperature +T < hωo/kB ≈ (14 × 103 K) · +� +ωo +2π · 300 THz +� +. +Quantum fluctuation-dissipation theorem.—A system +in thermal equilibrium at temperature T can be mod- +eled by the coupling of its observables to a noisy force +from the environment. In the simplest case of a single +observable ˆx, this coupling can be described by an inter- +action Hamiltonian ˆHint = −ˆx ˆfx, where ˆfx is the general- +ized force conjugate to the system operator ˆx originating +from the system’s quantum and thermal environmental +fluctuations. +In the linear response regime, the quan- +tum fluctuation-dissipation theorem (FDT) states that +the (symmetrized double-sided) power spectral density +of any system observable ˆy is [15, Eq. (7.2)] +¯Syy[ω] = ℏ coth +� ℏω +2kBT +� +Im χyx[ω], +(1) +where χyx is the susceptibility of the observable ˆy to the +generalized force ˆfx, i.e. ˆy[ω] = χyx[ω] ˆfx[ω]. Using the +identity coth +� α +2 +� += 1 + 2(eα − 1)−1, we rewrite this as +¯Syy[ω] = ℏ (2nth[ω] + 1) Im χyx[ω], +(2) +where nth[ω] ≡ (eℏω/kBT − 1)−1 is the Bose-Einstein oc- +cupation number. +We define the quantum noise (QN) in ˆy to be its fluc- +tuations at zero temperature: +¯SQN +yy [Ω] ≡ lim +T →0 +¯Syy[ω] = ℏ Im χyx[ω], +(3) +arXiv:2301.00338v1 [astro-ph.IM] 1 Jan 2023 + +2 +ˆx +ˆE +Mechanical frequencies Ωm, Hz∼kHz +(“hot”, kBT ≫ ¯hΩm) +Thermal noise ∝ kBT +Ωm +Zero-point +fluctuations ∝ ¯h +Power spectral density +Optical frequencies ωo, ∼ 1014 Hz +(“cold”, kBT ≪ ¯hωo) +Carrier field +Shot noise ∝ ¯h +Optical thermal noise +∝ ¯h exp +� +− ¯hωo +kBT +� +Mechanically-induced +sidebands on carrier +field +Frequency +FIG. 1. +A qualitative depiction of noise terms arising from the general fluctuation-dissipation theorem, coupling into each of +the mechanical modes ˆx and optical modes ˆE. At low frequencies—up to ∼kHz—contributions to the mechanical motion of the +test masses are plotted as power spectra. At these frequencies, thermal noise (red) dominates over the zero-point fluctuations +of the test masses (purple). At high frequencies—in the ∼THz range—the optical power spectrum is shown. Here, the optical +vacuum fluctuations (purple) are the relevant effect; the thermal occupation of optical modes (red) is exponentially suppressed. +For noise each curve, the relevant prefactors to the susceptibilities Im χ [from eq. (3); eqs. (4) and (6)] are also shown. Gray +shows the optical sidebands due to mechanical motion. +also called the zero-point fluctuations. +The thermal noise (TN) is then the remaining T- +dependent term in eq. (2), +¯STN +yy [ω] ≡ ℏ · 2nth[ω] Im χyx[ω]. +(4) +Indeed, in the regime where the thermal energy dom- +inates (kBT +≫ ℏω), we have nth ≈ kBT/ℏω ≫ 1 +and recover the classical FDT result, ¯Syy ≈ ¯STN +yy +≈ +(2kBT/ω) Im χyx. +The power of the FDT is that mere knowledge +of the susceptibility—an object accessible to classical +experimenters—dictates all fundamental (i.e. quantum +and thermal) noises of interest. Even further, it implies +that the thermal and quantum noises are directly related +to each other as +¯STN +yy [ω] = 2nth[ω] ¯SQN +yy [ω]. +(5) +Thus, one can be bootstrapped from the other, even with- +out direct knowledge of the susceptibility. +For a system in either the “cold” or “hot” regime, we +can approximate the occupation number as +nth[ω] ≈ +� +� +� +e−ℏω/kBT , +kBT ≪ ℏω (“cold”), +kBT +ℏω , +kBT ≫ ℏω (“hot”), +(6) +in eq. (5) to relate the known quantum noise to the ther- +mal noise and vice versa. In contemporary GW detec- +tors, the mechanical and optical modes are respectively +in the hot and cold regimes. +Thus, the known TN in +the mechanical degrees of freedom—calculated indepen- +dently using the classical FDT [1, 2]—can be used to +estimate the mechanical QN: +¯SQN,mech +yy +[ω] ≈ +ℏω +2kBT +¯STN,mech +yy +[ω]. +(7) +Similarly, the known QN in the optical field—calculated +independently, say from input-output relations [5, 6]— +can be used to estimate the optical TN: +¯STN,opt +yy +[ω] ≈ 2e−ℏω/kBT · ¯SQN,opt +yy +[ω]. +(8) +Figure 1 qualitatively shows the well-understood mechan- +ical TN and optical QN, as well as the bootstrapped me- +chanical quantum and optical thermal noises. In the fol- +lowing we discuss the specifics of each of the mechanical +and optical degrees of freedom in GW detectors. +Test-mass quantization.—The test masses in GW de- +tectors are engineered to be acoustic frequency mechan- +ical oscillators. +Their simplest description is through +a lumped element model of a mechanical force ˆfx—by +definition conjugate to the displacement ˆx—driving the +displacement, i.e. +ˆx[Ω] = χxx[Ω] ˆfx[Ω]. Given the test +mass pendulum mode is structurally damped, the damp- +ing rate is Γm[Ω] = Ω2 +m/ΩQ, where Ωm is the mechanical +resonance frequency and Q the mode quality factor [1]. +The pendulum mode susceptibility is then +χ−1 +xx [Ω] = m(−Ω2 + Ω2 +m − iΩ2 +m/Q). +(9) + +3 +It is precisely the interaction of the test mass oscilla- +tor with its environment—and the concomitant spread- +ing of its susceptibility in frequency—that was assumed +to be negligible in the analysis of Braginsky et al. [13]. +Accounting for it consistently using the quantum FDT +shows that the zero-point motion of the oscillator is the +mechanical quantum noise: +¯SQN +xx [Ω] = ℏ +m · +Ω2 +m/Q +(Ω2 − Ω2m)2 + (Ω2m/Q)2 . +(10) +Even in this simple model, the quantum noise of the test +mass is a broadband displacement noise [18, 20] that can- +not, prima facie, be “filtered” out as asserted by Bragin- +sky et al. [13]. In Advanced LIGO for example [21], be- +cause of the low frequency and low loss of the test mass’s +pendulum mode (Ωm ≈ 2π · 0.4 Hz and Q ≈ 108), this +model predicts the off-resonant test-mass quantum noise: +� +¯SQN +xx [Ω ≫ Ωm] ≈ 10−25 m/ +√ +Hz · +� +Ω +2π · 10 Hz +�−2 +, +six orders of magnitude smaller than the thermal noise. +In reality, the test masses (and their suspensions) are +not lumped elements. +They are vibrating elastic con- +tinua; further, in interferometric GW detectors, test +masses have mirror coatings which have their own elas- +tic fluctuations. Although a lumped element treatment +of the susceptibility is not possible in this case, suscepti- +bilities that describe the thermal noise can nevertheless +be derived [2]. This is then precisely where our earlier +observations are helpful. Since the relevant frequencies +Ω ≈ 2π·(0.1−103) Hz and the operating temperature sat- +isfies T ≫ ℏΩ/kB, these modes are in the “hot” regime. +Thus, knowledge of the thermal noise allows a direct and +accurate prediction of the broadband mechanical quan- +tum noise using eq. (7). The dashed purple line in fig. 2 +shows the broadband mechanical quantum noise in Ad- +vanced LIGO bootstrapped from the well-modeled me- +chanical thermal noise (red solid line). +Note that the +broadband mechanical quantum noise is relatively white, +in contrast to the 1/Ω falloff of the thermal noise am- +plitude spectral density, a consequence of the frequency +prefactor in eq. (7). The dashed orange line is the pre- +diction from a lumped element model of the pendulum +mode alone [eq. (10)]. Clearly, the displacement quan- +tum noise is broadband, but negligible compared to the +corresponding thermal noise—a fact that is contingent +on the operating temperature. +Optical thermal noise.—Information about the motion +of the test masses is imprinted onto electromagnetic fields +that propagate through the GW detector. Typically, the +incident field has a carrier at frequency ωo, while all rele- +vant information is contained in field fluctuations at fre- +quency offsets Ω around the carrier that are small com- +pared to ωo (i.e. |Ω| ≪ ωo). Thus we are interested in +¯SEE[ωo + Ω], which is given by the quantum FDT +¯SEE[ωo + Ω] = ℏ (2nth[ω0 + Ω] + 1) Im χEE[ω0 + Ω]. +100 +101 +102 +103 +Frequency [Hz] +10−28 +10−25 +10−22 +10−19 +10−16 +Displacement ASD [m/ +√ +Hz] +Total +Optical quantum +Test-mass thermal +Pendulum mode quant. +Test-mass quant. +Optical thermal +FIG. 2. +Gray shows the design sensitivity of Advanced +LIGO, which is dominated by mechanical thermal noise (red +solid) up to ∼200 Hz and by optical quantum noise (purple +solid) above that frequency [21]. Orange dashed is the pre- +dicted mechanical quantum noise in a simplified model of the +test-mass pendulum [eq. (10)], while purple dashed shows the +prediction [eq. (7)] for the full mechanical degree of freedom. +The difference in shape between the test-mass quantum and +thermal noises is a result of the frequency-dependent factor +in eq. (7). Red dashed is the optical thermal noise predicted +from the known optical quantum noise using eq. (11). +The optical field in current interferometric GWs are in +the “cold” regime with respect to the optical carrier and +“hot” with respect to the offset frequency Ω, i.e. ℏΩ ≪ +kBT ≪ ℏωo. Thus field fluctuations around the carrier +are quantified by +1 + 2nth[ωo + Ω] ≈ 1 + 2e−ℏωo/kBT +� +1 + ℏΩ +kBT · +� +, +which consists of a dominant quantum noise term [19, +22, 23] with an exponentially small thermal noise contri- +bution. Thus the thermal noise around the electric field +carrier is related to the quantum noise by +¯STN +EE[ωo + Ω] ≈ 2e−ℏωo/kBT +� +1 + ℏΩ +kBT +� +¯SQN +EE[ωo + Ω]. +(11) +In Advanced LIGO, the quantum noise contribution of +the optical field fluctuations is well-characterized (see +purple solid line in fig. 2). +Applying eq. (11) allows +a direct extrapolation of the optical thermal noise (red +dashed line in fig. 2), which is shown to be negligible, even +when compared to the already small mechanical quantum +noise. + +4 +Some designs for future upgrades to detectors and +next-generation installations employ cryogenic technolo- +gies. However, the temperature will only be reduced by +an order of magnitude or two [24–26] and the implications +of fig. 2 will be unchanged. A contrasting example can be +found in superfluid helium-4 Weber bar antennae, which +represent an altogether different technology that breaks +into a new operational regime. +Here, the (tune-able) +mechanical resonance of the superfluid at ∼1 kHz cou- +ples to a microwave cavity resonant at 10.6 GHz [27, 28]. +The microwave readout circuit spans temperatures from +50 mK to room temperature. +For temperatures above +0.5 K within this circuit, the microwave modes enter the +hot regime and its thermal noise becomes significant com- +pared to quantum fluctuations. +Opto-/electro-mechanical +interactions.—Figure +2 +shows the noises that contribute to the free-running +displacement ˆx0 of the test masses, where the effect of +optical and electrical feedback has been removed by the +calibration process. Here, we explain why the specifics +of such feedback do not affect our analysis as far as +metrology is concerned. +The feedback force on the oscillator can be written as +ˆffb[Ω] = χ−1 +xx,fb[Ω]ˆx[Ω], +where χ−1 +xx,fb is the displacement-to-force open-loop trans- +fer function, and we have omitted the noise component +of the feedback since it has no effect on calibration. This +feedback may be electro-optic control loops or direct opti- +cal feedback from detuned interaction [29]. The displace- +ment fluctuations due to the FDT [eq. (2)] can equiva- +lently be written as force fluctuations ˆfx, with spectrum +¯Sfxfx[Ω] = ℏ (2nth[Ω] + 1) Im +� +−χ−1 +yx [Ω] +� +, +(12) +that sum with the feedback force ˆffb. +As a result of the feedback, the test-mass suscep- +tibility is modified from its intrinsic form χxx [see +eq. (9)] to an effective (i.e. +closed-loop) susceptibility, +χxx,cl +≡ χxx/(1 − χxxχ−1 +xx,fb). The displacement ob- +served with the loop closed is then ˆxcl ≡ χxx,cl ˆfx. This +can be extended to also include the GW signal, which +couples to the test mass displacement via a force ˆfgw [30]. +The free-running displacement is then inferred as +ˆx0 = ˆxcl +� +1 − χxxχ−1 +xx,fb +� += χxx +� +ˆfx + ˆfgw +� +, +where we drop the frequency-dependence of each term for +brevity. By convention, the spectrum ¯Sff of the noise ˆfx +is calibrated to a displacement spectrum and then plotted +as in fig. 2. Since the effect of the feedback is common +to all forces, our ability to measure ˆfgw depends only +on the force noise ˆfx and not on the behavior of the +feedback system. In the normal operation of Advanced +LIGO, additional technical noises dominate over these +fundamental noise sources at low frequencies (<∼ 10 Hz +for the Advanced LIGO design) [10], but we have omitted +these for simplicity. +Our conclusion that the effect of feedback is inconse- +quential for metrology (as in GW detectors) should not +be confused with a statement on feedback-based quantum +state preparation in general. For example, feedback can +be used, given certain conditions on the measurement +sensitivity, to trap and cool the motion of test masses, +as is done to the pendulum mode in Ref. [31]. For the +purposes of metrology, however, such an exercise will sup- +press the signal (i.e. the force originating from GWs ˆfgw) +and offer no improvement in signal-to-noise ratio. +Conclusion.—Thermal and quantum noises place fun- +damental limits on sensitivities achievable by GW detec- +tors. In this letter, we expand on Braginsky’s treatment +of these noise sources [13] using the general fluctuation- +dissipation theorem. Our approach allows a direct com- +putation of mechanical quantum noise (“test-mass quan- +tization noise”) and optical thermal noise from the well- +understood mechanical thermal noise and optical quan- +tum noise respectively. In doing so we settle the long- +standing question of test-mass quantization noise in GW +detectors: it is a broadband source of noise that can- +not be neglected on the grounds of being limited to +certain frequencies, but it lies many orders of magni- +tude below the sensitivity of any GW detector based +on current technology. +Acknowledgments.—The authors acknowledge the sup- +port of the National Science Foundation and the LIGO +Laboratory. +LIGO was constructed by the California +Institute of Technology and Massachusetts Institute of +Technology with funding from the National Science Foun- +dation, and operates under Cooperative Agreement No. +PHY-1764464. The authors additionally thank Lisa Bar- +sotti, Joe Bentley, Farid Khalili, Mikhail Korobko, Sergey +Vyatchanin, Bernard Whiting and Christopher Wipf for +useful comments. This paper has LIGO Document Num- +ber LIGO-P2200369. +∗ chris.whittle@ligo.org +† vivishek@mit.edu +[1] P. R. Saulson, Physical Review D 42, 2437 (1990). +[2] Y. Levin, Phys. Rev. D 57, 659 (1998). +[3] C. M. Caves, Phys. Rev. D 23, 1693 (1981). +[4] C. M. Caves and B. L. Schumaker, Phys. Rev. A 31, 3068 +(1985). +[5] A. Buonanno and Y. Chen, Phys. Rev. D 64, 042006 +(2001). +[6] H. J. Kimble, Y. Levin, A. B. Matsko, K. S. Thorne, and +S. P. Vyatchanin, Phys. Rev. D 65, 022002 (2001). +[7] T. Corbitt, Y. Chen, and N. Mavalvala, Phys. Rev. A 72, +013818 (2005). +[8] H. Yu, L. McCuller, M. Tse, N. Kijbunchoo, L. Barsotti, +and N. Mavalvala, Nature 583, 43 (2020). + +5 +[9] F. Acernese et al. (The Virgo Collaboration), Phys. Rev. +Lett. 125, 131101 (2020). +[10] A. Buikema, C. Cahillane, G. Mansell, C. Blair, et al., +Physical Review D 102, 062003 (2020). +[11] M. Tse, H. Yu, N. Kijbunchoo, et al., Physical Review +Letters 123, 231107 (2019). +[12] F. Acernese et al. (Virgo Collaboration), Phys. Rev. Lett. +123, 231108 (2019). +[13] V. B. Braginsky, M. L. Gorodetsky, F. Y. Khalili, A. B. +Matsko, K. S. Thorne, and S. P. Vyatchanin, Phys. Rev. +D 67, 082001 (2003). +[14] H. Callen and T. Welton, Physical Review 83, 34 (1951). +[15] R. Kubo, Reports on Progress in Physics 29, 255 (1966). +[16] M. Lax, Reviews of Modern Physics 32, 25 (1960). +[17] J.-M. Courty and S. Reynaud, Physical Review A 46, +2766 (1992). +[18] A. A. Clerk, M. H. Devoret, S. M. Girvin, F. Marquardt, +and R. J. Schoelkopf, Reviews of Modern Physics 82, +1155 (2010). +[19] H. Miao, N. D. Smith, and M. Evans, Phys. Rev. X 9, +011053 (2019). +[20] F. Y. Khalili, H. Miao, H. Yang, A. H. Safavi-Naeini, +O. Painter, and Y. Chen, Phys. Rev. A 86, 033840 (2012). +[21] The LIGO Scientific Collaboration, Classical and Quan- +tum Gravity 32, 074001 (2015). +[22] B. J. Meers and K. A. Strain, Phys. Rev. A 44, 4693 +(1991). +[23] A. Pace, M. Collett, and D. Walls, Physical Review A +47, 3173 (1993). +[24] R. X. Adhikari, K. Arai, A. Brooks, C. Wipf, O. Aguiar, +P. Altin, B. Barr, L. Barsotti, R. Bassiri, A. Bell, et al., +Classical and Quantum Gravity 37, 165003 (2020). +[25] T. Akutsu, M. Ando, K. Arai, Y. Arai, and K. collabo- +ration, Nature Astronomy 3, 35 (2019). +[26] ET Steering Committee, et al., ET Public Document +(2020). +[27] S. Singh, L. De Lorenzo, I. Pikovski, and K. Schwab, New +Journal of Physics 19, 073023 (2017). +[28] L. De Lorenzo and K. Schwab, Journal of Low Temper- +ature Physics 186, 233 (2017). +[29] K. Komori, D. ˇDurovˇc´ıkov´a, and V. Sudhir, Physical Re- +view A 105, 043520 (2022). +[30] J. Weber and J. A. Wheeler, Rev. Mod. Phys. 29, 509 +(1957). +[31] C. Whittle, E. D. Hall, S. Dwyer, N. Mavalvala, V. Sud- +hir, et al., Science 372, 1333 (2021). + diff --git a/HNAyT4oBgHgl3EQffPjW/content/tmp_files/load_file.txt b/HNAyT4oBgHgl3EQffPjW/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b3833eab9b24d5deed8f054ddbd3d2971a8b444 --- /dev/null +++ b/HNAyT4oBgHgl3EQffPjW/content/tmp_files/load_file.txt @@ -0,0 +1,378 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf,len=377 +page_content='Unification of thermal and quantum noise in gravitational-wave detectors Chris Whittle,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ∗ Lee McCuller,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='2 Vivishek Sudhir,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' † and Matthew Evans1 1LIGO,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Massachusetts Institute of Technology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Cambridge,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' MA 02139,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' USA 2LIGO,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' California Institute of Technology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Pasadena,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' CA 91125,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' USA 3Department of Mechanical Engineering,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Massachusetts Institute of Technology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Cambridge,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' MA 02139 (Dated: January 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2023) Contemporary gravitational-wave detectors are fundamentally limited by thermal noise—due to dissipation in the mechanical elements of the test mass—and quantum noise—from the vacuum fluctuations of the optical field used to probe the test mass position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Two other fundamental noises can in principle also limit sensitivity: test-mass quantization noise due to the zero-point fluctuation of its mechanical modes, and thermal excitation of the optical field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' We use the quantum fluctuation- dissipation theorem to unify all four noises.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' This unified picture shows precisely when test-mass quantization noise and optical thermal noise can be ignored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Introduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—Fundamental constraints on the sensi- tivity of gravitational-wave (GW) detectors arise from classical and quantum fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' At present, each of these noises is modeled using different techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Ther- mal noise—due to the Brownian motion of the mechani- cal test mass, its suspension, and the mirror coating on the test mass—is derived from the (classical) fluctuation- dissipation theorem [1, 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Quantum noise—due to the vacuum fluctuations in the phase and amplitude of the optical field used to measure the test mass position—is derived from quantum electrodynamics in the so-called “two-photon formalism” [3–7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The sum of these noises limit the performance of today’s GW detectors: quan- tum fluctuations in the amplitude of the optical field drive the motion of the test mass in the ∼20-50 Hz range [8, 9], Brownian motion of the mirror coatings dom- inate in the ∼50-200 Hz [10], and quantum fluctuations in the phase of the optical field sets the sensitivity above 200 Hz [11, 12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In principle there exist noises that are exactly com- plementary, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' quantum noise of the mechanical de- grees of freedom and thermal noise of the optical field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The former is a consequence of quantizing the mechan- ical motion of the interferometer test masses and the zero-point fluctuations that manifest as a result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In fact, Braginsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' studied the role of test-mass quantiza- tion noise [13], concluding that “test-mass quantization is irrelevant [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ] if one filters the output data appropri- ately”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' On the other hand, thermal fluctuations of the optical field—for example due to blackbody radiation— can contribute excess noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' We show that the four fundamental noises described above—thermal and quantum noises of the mechanical and optical degrees of freedom—can all be treated uni- formly using the quantum extension of the fluctuation- dissipation theorem [14–19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' This perspective enables a simple treatment of the test-mass quantum noise that is independent of the detector topology, instead depending only on the relative thermal and optical quantum energy scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In doing so, we extend the analysis of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [13] to incorporate mechanical losses and to allow for differing GW detector optical topologies or arbitrarily complex test mass suspensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' We find that test-mass quanti- zation noise is negligible in principle—i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' independent of any “filtering”—so long as the mechanical degrees of freedom of the test mass resonate at acoustic frequencies (Ωm) and the detector is operated at a temperature T > ℏΩm/kB ≈ (5 × 10−11 K) · � Ωm 2π · 1 Hz � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Likewise, optical thermal noise at the carrier frequency ωo is negligible compared to its quantum noise as long as GW detectors are operated at a temperature T < hωo/kB ≈ (14 × 103 K) · � ωo 2π · 300 THz � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Quantum fluctuation-dissipation theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—A system in thermal equilibrium at temperature T can be mod- eled by the coupling of its observables to a noisy force from the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In the simplest case of a single observable ˆx, this coupling can be described by an inter- action Hamiltonian ˆHint = −ˆx ˆfx, where ˆfx is the general- ized force conjugate to the system operator ˆx originating from the system’s quantum and thermal environmental fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In the linear response regime, the quan- tum fluctuation-dissipation theorem (FDT) states that the (symmetrized double-sided) power spectral density of any system observable ˆy is [15, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='2)] ¯Syy[ω] = ℏ coth � ℏω 2kBT � Im χyx[ω], (1) where χyx is the susceptibility of the observable ˆy to the generalized force ˆfx, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ˆy[ω] = χyx[ω] ˆfx[ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Using the identity coth � α 2 � = 1 + 2(eα − 1)−1, we rewrite this as ¯Syy[ω] = ℏ (2nth[ω] + 1) Im χyx[ω], (2) where nth[ω] ≡ (eℏω/kBT − 1)−1 is the Bose-Einstein oc- cupation number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' We define the quantum noise (QN) in ˆy to be its fluc- tuations at zero temperature: ¯SQN yy [Ω] ≡ lim T →0 ¯Syy[ω] = ℏ Im χyx[ω], (3) arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='00338v1 [astro-ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='IM] 1 Jan 2023 2 ˆx ˆE Mechanical frequencies Ωm, Hz∼kHz (“hot”, kBT ≫ ¯hΩm) Thermal noise ∝ kBT Ωm Zero-point fluctuations ∝ ¯h Power spectral density Optical frequencies ωo, ∼ 1014 Hz (“cold”, kBT ≪ ¯hωo) Carrier field Shot noise ∝ ¯h Optical thermal noise ∝ ¯h exp � − ¯hωo kBT � Mechanically-induced sidebands on carrier field Frequency FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A qualitative depiction of noise terms arising from the general fluctuation-dissipation theorem, coupling into each of the mechanical modes ˆx and optical modes ˆE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' At low frequencies—up to ∼kHz—contributions to the mechanical motion of the test masses are plotted as power spectra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' At these frequencies, thermal noise (red) dominates over the zero-point fluctuations of the test masses (purple).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' At high frequencies—in the ∼THz range—the optical power spectrum is shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Here, the optical vacuum fluctuations (purple) are the relevant effect;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' the thermal occupation of optical modes (red) is exponentially suppressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' For noise each curve, the relevant prefactors to the susceptibilities Im χ [from eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (3);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (4) and (6)] are also shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Gray shows the optical sidebands due to mechanical motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' also called the zero-point fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The thermal noise (TN) is then the remaining T- dependent term in eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (2), ¯STN yy [ω] ≡ ℏ · 2nth[ω] Im χyx[ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (4) Indeed, in the regime where the thermal energy dom- inates (kBT ≫ ℏω), we have nth ≈ kBT/ℏω ≫ 1 and recover the classical FDT result, ¯Syy ≈ ¯STN yy ≈ (2kBT/ω) Im χyx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The power of the FDT is that mere knowledge of the susceptibility—an object accessible to classical experimenters—dictates all fundamental (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' quantum and thermal) noises of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Even further, it implies that the thermal and quantum noises are directly related to each other as ¯STN yy [ω] = 2nth[ω] ¯SQN yy [ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (5) Thus, one can be bootstrapped from the other, even with- out direct knowledge of the susceptibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' For a system in either the “cold” or “hot” regime, we can approximate the occupation number as nth[ω] ≈ � � � e−ℏω/kBT , kBT ≪ ℏω (“cold”), kBT ℏω , kBT ≫ ℏω (“hot”), (6) in eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (5) to relate the known quantum noise to the ther- mal noise and vice versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In contemporary GW detec- tors, the mechanical and optical modes are respectively in the hot and cold regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thus, the known TN in the mechanical degrees of freedom—calculated indepen- dently using the classical FDT [1, 2]—can be used to estimate the mechanical QN: ¯SQN,mech yy [ω] ≈ ℏω 2kBT ¯STN,mech yy [ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (7) Similarly, the known QN in the optical field—calculated independently, say from input-output relations [5, 6]— can be used to estimate the optical TN: ¯STN,opt yy [ω] ≈ 2e−ℏω/kBT · ¯SQN,opt yy [ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (8) Figure 1 qualitatively shows the well-understood mechan- ical TN and optical QN, as well as the bootstrapped me- chanical quantum and optical thermal noises.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In the fol- lowing we discuss the specifics of each of the mechanical and optical degrees of freedom in GW detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Test-mass quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—The test masses in GW de- tectors are engineered to be acoustic frequency mechan- ical oscillators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Their simplest description is through a lumped element model of a mechanical force ˆfx—by definition conjugate to the displacement ˆx—driving the displacement, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ˆx[Ω] = χxx[Ω] ˆfx[Ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Given the test mass pendulum mode is structurally damped, the damp- ing rate is Γm[Ω] = Ω2 m/ΩQ, where Ωm is the mechanical resonance frequency and Q the mode quality factor [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The pendulum mode susceptibility is then χ−1 xx [Ω] = m(−Ω2 + Ω2 m − iΩ2 m/Q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (9) 3 It is precisely the interaction of the test mass oscilla- tor with its environment—and the concomitant spread- ing of its susceptibility in frequency—that was assumed to be negligible in the analysis of Braginsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Accounting for it consistently using the quantum FDT shows that the zero-point motion of the oscillator is the mechanical quantum noise: ¯SQN xx [Ω] = ℏ m · Ω2 m/Q (Ω2 − Ω2m)2 + (Ω2m/Q)2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (10) Even in this simple model, the quantum noise of the test mass is a broadband displacement noise [18, 20] that can- not, prima facie, be “filtered” out as asserted by Bragin- sky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In Advanced LIGO for example [21], be- cause of the low frequency and low loss of the test mass’s pendulum mode (Ωm ≈ 2π · 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='4 Hz and Q ≈ 108), this model predicts the off-resonant test-mass quantum noise: � ¯SQN xx [Ω ≫ Ωm] ≈ 10−25 m/ √ Hz · � Ω 2π · 10 Hz �−2 , six orders of magnitude smaller than the thermal noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In reality, the test masses (and their suspensions) are not lumped elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' They are vibrating elastic con- tinua;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' further, in interferometric GW detectors, test masses have mirror coatings which have their own elas- tic fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Although a lumped element treatment of the susceptibility is not possible in this case, suscepti- bilities that describe the thermal noise can nevertheless be derived [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' This is then precisely where our earlier observations are helpful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Since the relevant frequencies Ω ≈ 2π·(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='1−103) Hz and the operating temperature sat- isfies T ≫ ℏΩ/kB, these modes are in the “hot” regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thus, knowledge of the thermal noise allows a direct and accurate prediction of the broadband mechanical quan- tum noise using eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The dashed purple line in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2 shows the broadband mechanical quantum noise in Ad- vanced LIGO bootstrapped from the well-modeled me- chanical thermal noise (red solid line).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Note that the broadband mechanical quantum noise is relatively white, in contrast to the 1/Ω falloff of the thermal noise am- plitude spectral density, a consequence of the frequency prefactor in eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The dashed orange line is the pre- diction from a lumped element model of the pendulum mode alone [eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (10)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Clearly, the displacement quan- tum noise is broadband, but negligible compared to the corresponding thermal noise—a fact that is contingent on the operating temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Optical thermal noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—Information about the motion of the test masses is imprinted onto electromagnetic fields that propagate through the GW detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Typically, the incident field has a carrier at frequency ωo, while all rele- vant information is contained in field fluctuations at fre- quency offsets Ω around the carrier that are small com- pared to ωo (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' |Ω| ≪ ωo).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thus we are interested in ¯SEE[ωo + Ω], which is given by the quantum FDT ¯SEE[ωo + Ω] = ℏ (2nth[ω0 + Ω] + 1) Im χEE[ω0 + Ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 100 101 102 103 Frequency [Hz] 10−28 10−25 10−22 10−19 10−16 Displacement ASD [m/ √ Hz] Total Optical quantum Test-mass thermal Pendulum mode quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Test-mass quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Optical thermal FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Gray shows the design sensitivity of Advanced LIGO, which is dominated by mechanical thermal noise (red solid) up to ∼200 Hz and by optical quantum noise (purple solid) above that frequency [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Orange dashed is the pre- dicted mechanical quantum noise in a simplified model of the test-mass pendulum [eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (10)], while purple dashed shows the prediction [eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (7)] for the full mechanical degree of freedom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The difference in shape between the test-mass quantum and thermal noises is a result of the frequency-dependent factor in eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Red dashed is the optical thermal noise predicted from the known optical quantum noise using eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The optical field in current interferometric GWs are in the “cold” regime with respect to the optical carrier and “hot” with respect to the offset frequency Ω, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ℏΩ ≪ kBT ≪ ℏωo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thus field fluctuations around the carrier are quantified by 1 + 2nth[ωo + Ω] ≈ 1 + 2e−ℏωo/kBT � 1 + ℏΩ kBT · � , which consists of a dominant quantum noise term [19, 22, 23] with an exponentially small thermal noise contri- bution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thus the thermal noise around the electric field carrier is related to the quantum noise by ¯STN EE[ωo + Ω] ≈ 2e−ℏωo/kBT � 1 + ℏΩ kBT � ¯SQN EE[ωo + Ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (11) In Advanced LIGO, the quantum noise contribution of the optical field fluctuations is well-characterized (see purple solid line in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Applying eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (11) allows a direct extrapolation of the optical thermal noise (red dashed line in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2), which is shown to be negligible, even when compared to the already small mechanical quantum noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 4 Some designs for future upgrades to detectors and next-generation installations employ cryogenic technolo- gies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' However, the temperature will only be reduced by an order of magnitude or two [24–26] and the implications of fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2 will be unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A contrasting example can be found in superfluid helium-4 Weber bar antennae, which represent an altogether different technology that breaks into a new operational regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Here, the (tune-able) mechanical resonance of the superfluid at ∼1 kHz cou- ples to a microwave cavity resonant at 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='6 GHz [27, 28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The microwave readout circuit spans temperatures from 50 mK to room temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' For temperatures above 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='5 K within this circuit, the microwave modes enter the hot regime and its thermal noise becomes significant com- pared to quantum fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Opto-/electro-mechanical interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—Figure 2 shows the noises that contribute to the free-running displacement ˆx0 of the test masses, where the effect of optical and electrical feedback has been removed by the calibration process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Here, we explain why the specifics of such feedback do not affect our analysis as far as metrology is concerned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The feedback force on the oscillator can be written as ˆffb[Ω] = χ−1 xx,fb[Ω]ˆx[Ω], where χ−1 xx,fb is the displacement-to-force open-loop trans- fer function, and we have omitted the noise component of the feedback since it has no effect on calibration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' This feedback may be electro-optic control loops or direct opti- cal feedback from detuned interaction [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The displace- ment fluctuations due to the FDT [eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (2)] can equiva- lently be written as force fluctuations ˆfx, with spectrum ¯Sfxfx[Ω] = ℏ (2nth[Ω] + 1) Im � −χ−1 yx [Ω] � , (12) that sum with the feedback force ˆffb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' As a result of the feedback, the test-mass suscep- tibility is modified from its intrinsic form χxx [see eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (9)] to an effective (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' closed-loop) susceptibility, χxx,cl ≡ χxx/(1 − χxxχ−1 xx,fb).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The displacement ob- served with the loop closed is then ˆxcl ≡ χxx,cl ˆfx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' This can be extended to also include the GW signal, which couples to the test mass displacement via a force ˆfgw [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The free-running displacement is then inferred as ˆx0 = ˆxcl � 1 − χxxχ−1 xx,fb � = χxx � ˆfx + ˆfgw � , where we drop the frequency-dependence of each term for brevity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' By convention, the spectrum ¯Sff of the noise ˆfx is calibrated to a displacement spectrum and then plotted as in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Since the effect of the feedback is common to all forces, our ability to measure ˆfgw depends only on the force noise ˆfx and not on the behavior of the feedback system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In the normal operation of Advanced LIGO, additional technical noises dominate over these fundamental noise sources at low frequencies (<∼ 10 Hz for the Advanced LIGO design) [10], but we have omitted these for simplicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Our conclusion that the effect of feedback is inconse- quential for metrology (as in GW detectors) should not be confused with a statement on feedback-based quantum state preparation in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' For example, feedback can be used, given certain conditions on the measurement sensitivity, to trap and cool the motion of test masses, as is done to the pendulum mode in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' For the purposes of metrology, however, such an exercise will sup- press the signal (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' the force originating from GWs ˆfgw) and offer no improvement in signal-to-noise ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—Thermal and quantum noises place fun- damental limits on sensitivities achievable by GW detec- tors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In this letter, we expand on Braginsky’s treatment of these noise sources [13] using the general fluctuation- dissipation theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Our approach allows a direct com- putation of mechanical quantum noise (“test-mass quan- tization noise”) and optical thermal noise from the well- understood mechanical thermal noise and optical quan- tum noise respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' In doing so we settle the long- standing question of test-mass quantization noise in GW detectors: it is a broadband source of noise that can- not be neglected on the grounds of being limited to certain frequencies, but it lies many orders of magni- tude below the sensitivity of any GW detector based on current technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Acknowledgments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='—The authors acknowledge the sup- port of the National Science Foundation and the LIGO Laboratory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' LIGO was constructed by the California Institute of Technology and Massachusetts Institute of Technology with funding from the National Science Foun- dation, and operates under Cooperative Agreement No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' PHY-1764464.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' The authors additionally thank Lisa Bar- sotti, Joe Bentley, Farid Khalili, Mikhail Korobko, Sergey Vyatchanin, Bernard Whiting and Christopher Wipf for useful comments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' This paper has LIGO Document Num- ber LIGO-P2200369.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ∗ chris.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='whittle@ligo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='org † vivishek@mit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='edu [1] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Saulson, Physical Review D 42, 2437 (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [2] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Levin, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D 57, 659 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [3] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Caves, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D 23, 1693 (1981).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [4] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Caves and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Schumaker, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A 31, 3068 (1985).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Buonanno and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Chen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D 64, 042006 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [6] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Kimble, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Levin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Matsko, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thorne, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Vyatchanin, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D 65, 022002 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [7] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Corbitt, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Chen, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Mavalvala, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A 72, 013818 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [8] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Yu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' McCuller, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Tse, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Kijbunchoo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Barsotti, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Mavalvala, Nature 583, 43 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 5 [9] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Acernese et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (The Virgo Collaboration), Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 125, 131101 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [10] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Buikema, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Cahillane, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Mansell, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Blair, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=', Physical Review D 102, 062003 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Tse, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Yu, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Kijbunchoo, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=', Physical Review Letters 123, 231107 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [12] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Acernese et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' (Virgo Collaboration), Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 123, 231108 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [13] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Braginsky, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Gorodetsky, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Khalili, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Matsko, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Thorne, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Vyatchanin, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D 67, 082001 (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [14] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Callen and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Welton, Physical Review 83, 34 (1951).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [15] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Kubo, Reports on Progress in Physics 29, 255 (1966).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [16] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Lax, Reviews of Modern Physics 32, 25 (1960).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [17] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Courty and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Reynaud, Physical Review A 46, 2766 (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [18] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Clerk, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Devoret, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Girvin, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Marquardt, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Schoelkopf, Reviews of Modern Physics 82, 1155 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Miao, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Smith, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Evans, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' X 9, 011053 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [20] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Khalili, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Miao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Yang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Safavi-Naeini, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Painter, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Chen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A 86, 033840 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [21] The LIGO Scientific Collaboration, Classical and Quan- tum Gravity 32, 074001 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [22] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Meers and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Strain, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A 44, 4693 (1991).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [23] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Pace, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Collett, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Walls, Physical Review A 47, 3173 (1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [24] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Adhikari, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Arai, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Brooks, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Wipf, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Aguiar, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Altin, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Barr, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Barsotti, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Bassiri, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Bell, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=', Classical and Quantum Gravity 37, 165003 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [25] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Akutsu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Ando, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Arai, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Arai, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' collabo- ration, Nature Astronomy 3, 35 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [26] ET Steering Committee, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=', ET Public Document (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [27] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Singh, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' De Lorenzo, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Pikovski, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Schwab, New Journal of Physics 19, 073023 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [28] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' De Lorenzo and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Schwab, Journal of Low Temper- ature Physics 186, 233 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [29] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Komori, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' ˇDurovˇc´ıkov´a, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Sudhir, Physical Re- view A 105, 043520 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [30] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Weber and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Wheeler, Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' 29, 509 (1957).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' [31] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Whittle, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Hall, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Dwyer, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Mavalvala, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=' Sud- hir, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} +page_content=', Science 372, 1333 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/HNAyT4oBgHgl3EQffPjW/content/2301.00338v1.pdf'} diff --git a/HdFLT4oBgHgl3EQfIC9G/vector_store/index.pkl b/HdFLT4oBgHgl3EQfIC9G/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9aa2db4e8a3aca7dc027ffc0a900843cab1ef0db --- /dev/null +++ b/HdFLT4oBgHgl3EQfIC9G/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:046fc901c9939828f34d32ebe1f7af81347d92bdc32cdf2b5456614a8fd1ca9d +size 175615 diff --git a/ItAzT4oBgHgl3EQfjv0x/vector_store/index.pkl b/ItAzT4oBgHgl3EQfjv0x/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9ac712dc16c27d577ea6528a5bd11309b09ce2a3 --- /dev/null +++ b/ItAzT4oBgHgl3EQfjv0x/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:793fdf8cca8f43d4b9a87bf167ebb255bbbe2be9f33527a66d3eae511fe563b8 +size 67053 diff --git a/ItFJT4oBgHgl3EQfvi05/content/tmp_files/2301.11626v1.pdf.txt b/ItFJT4oBgHgl3EQfvi05/content/tmp_files/2301.11626v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..bfd386bd406c1f311e6e3a9ab44b244c4db399f1 --- /dev/null +++ b/ItFJT4oBgHgl3EQfvi05/content/tmp_files/2301.11626v1.pdf.txt @@ -0,0 +1,636 @@ +Electric field control of RKKY coupling through solid-state ionics +Maria Ameziane, Roy Rosenkamp, Luk´aˇs Flajˇsman, Sebastiaan van Dijken, and Rhodri Mansella) +NanoSpin, Department of Applied Physics, Aalto University School of Science, P.O. Box 15100, FI-00076 Aalto, +Finland +Placing a suitable spacer layer between two magnetic layers can lead to an interaction between the magnetic +layers known as Ruderman–Kittel–Kasuya–Yosida (RKKY) coupling. Controlling RKKY coupling, particu- +larly the ability to switch between ferromagnetic and antiferromagnetic coupling, would enable novel magnetic +data storage devices. By combining solid-state Li ion battery technology with an out-of-plane magnetized +Co/Pt-based stack coupled through a Ru interlayer we investigate the effects of the insertion of Li ions on the +magnetic properties of the stack. The RKKY coupling and its voltage dependence is measured as a function +of the Ru interlayer thickness, along with the effects of repeated voltage cycling. The Li ions both change the +amplitude of the RKKY coupling and its phase, leading to the ability to switch the RKKY coupling between +ferromagnetic and antiferromagnetic with applied voltages. +The ability to control magnetism through applied volt- +ages opens a path to low energy magnetic data stor- +age devices1–3. +Among the various approaches to us- +ing voltages to control magnetism, magneto-ionics has +recently seen increased interest due to the large ef- +fects obtainable with this approach4–6. The insertion of +non-magnetic ions into magnetic layers has been shown +to change important magnetic properties such as the +saturation magnetization7–9, magnetic anisotropy8–10, +Dzyaloshinskii-Moriya interaction11,12 as well as ex- +change bias13 and ferrimagnetic order14. +However, for +applications in digital memory and logic it is preferable +not to change the magnitude of a magnetic property +but to cause 180◦ switching. +This is not straightfor- +wardly achieved with electric fields, which lack the time- +symmetry breaking property of magnetic fields14. Whilst +pulse switching is possible15, this requires finely tuned +magnetic parameters. One path is to use two magnetic +layers coupled through RKKY interactions16–20. +The +coupling derives from spin-dependent reflection of the +electron wavefunction at the normal metal / magnetic +metal interface. This leads to a coupling that oscillates +between antiferromagnetic and ferromagnetic coupling as +a function of the thickness of the normal metal spacer +layer, which is characterized by the wavelength and phase +of the oscillation as well as the decay length of the enve- +lope of the oscillation16,18. +RKKY coupling is sensitive to changes to the system, +as it depends on the electrons at the Fermi surface of the +interlayer18. This means the coupling can be modified for +instance by doping the interlayer21, modifying the cap- +ping layer22, or changing the band filling in the spacer +layer23. Control of RKKY coupling is a promising target +for devices that aim to control magnetism through elec- +tric fields. As well as theoretical proposals for devices24, +control has been demonstrated in several experimental +systems based on liquid ion gating25 and voltage-induced +switching in magnetic tunnel junctions26,27. +However, +the switching of tunnel junctions still involves significant +a)Electronic mail: rhodri.mansell@aalto.fi +current densities. Switching of magnetic layers through +voltage control of RKKY interactions has been achieved +in Co-based perpendicularly magnetized layers using the +insertion of hydrogen ions20. +Here we investigate the electric field control of RKKY +coupling using a solid-state Li ion based device incorpo- +rating a Li storage layer, lithium cobalt oxide (LCO), +and an ionic conductor, lithium phosphorous oxynitride +(LiPON). By using technology taken from the field of +solid-state Li ion batteries a large density of Li ions can +be provided at low voltages10. Perpendicularly magne- +tized Co/Pt layers are used for the fixed and free layers +of the device which are coupled through a wedged Ru +layer. Applying a positive voltage to the top electrode +of a junction causes Li ions to move from the storage +layer through the ionic conductor to the top layers of the +metallic stack10. +FIG. 1. (a) Cross-sectional schematic of a junction. (b) Op- +tical microscopy image of the device consisting of vertical +bottom electrodes and a horizontal top electrode. The bot- +tom electrodes are 100 µm across. The thickness of the Ru +interlayer increases from left to right. (c) Major hysteresis +loop (black) and minor loop (orange) of the junction with 1.0 +nm Ru at 0 V showing antiferromagnetic coupling between +the free and fixed magnetic layer. (d) Major hysteresis loop +(black) and minor loop (orange) of the junction with 2.4 nm +Ru at 0 V showing ferromagnetic coupling. +In Fig. 1(a) we show a cross-sectional schematic of a +junction. The total structure consists of a metal bottom +electrode which contains all of the magnetic layers. The +metallic stack is Ta (2 nm) / Pt (4 nm) / [Co (1 nm) / +arXiv:2301.11626v1 [cond-mat.mtrl-sci] 27 Jan 2023 + + signal (arb) +0 +880 +80 +gnal ( +0 +0 +0 +g +0 +0 +0 +s +0 +0 +E +0 +山 +0 +0 +MOKI +K +0 +0 +0 +0 +o +0 +-100 +100 +-100 +0 +100 +Applied Field (mT) +Applied Field (mT)2 +Pt (1 nm)]4 / Co (1 nm) / Ru wedge / Pt (0.25 nm) / +Co (0.4 nm) / Pt (0.25 nm) / Ti (1.5 nm). The bottom +Co/Pt multilayer below the Ru wedge acts as the fixed +layer of the device, with the top Co single layer acting +as the free layer. The 0.25 nm Pt layers around the top +Co layer lead to perpendicular magnetization while still +preserving the RKKY coupling28. The upper layers form +the Li ion conduction and storage part of the device and +consist of LiPON (70 nm) / LCO (20 nm) / Pt (5 nm). +The bottom electrode, consisting of the metallic layers +capped by Ti, is patterned by optical lithography and +lift off into 100 µm wide stripes, where the length of the +stripes is orthogonal to the direction of the Ru wedge. +A second lithography step is used to create an insulating +SiN layer with windows. +The top electrode then cre- +ates cross junctions with the metallic bottom electrode +through the windows in the SiN layer. The SiN acts to +reduce shorting at the edges of the junctions. The Ru +wedge thickness is estimated from a calibration sample +grown with the same wedge parameters as the device. +The fabrication process results in an array of crossbar +junctions each with a different average thickness of the +Ru interlayer. Figure 1(b) shows an optical microscopy +image of the device structure. +The change of the Ru +thickness within each junction is of the order of 0.3 ˚A +and the thickness increases from left to right. . The bot- +tom electrode is grounded and voltages are applied to the +top electrode using a Keithley sourcemeter. To demon- +strate the magnetic behavior of the junctions we show in +Fig. 1(c) the junction with a Ru thickness of 1.9 nm. The +major hysteresis loop (black) shows two switches coming +from negative saturation, firstly the smaller switch of the +top Co single layer before 0 mT, followed by the switch +of the Co/Pt multilayer at around +80 mT. The thin Pt +layers lead to exchange coupling between the bottom five +Co layers so that they switch as an effective single layer. +The top Co layer is RKKY coupled to the bottom layers. +This leads to an effective bias field on the layer, which it- +self depends on the thickness of the Ru interlayer. Here, +the top layer minor loop (orange) shows antiferromag- +netic coupling. Coming from negative saturation the top +layer switches already at negative fields so that the top +layer is aligned antiparallel to the bottom layers at zero +magnetic field. In Fig. 1(d), at a different point on the +wedge with 2.4 nm of Ru, we measure a minor loop that +corresponds to ferromagnetic coupling. Here the minor +loop is shifted to positive applied fields, showing that the +RKKY coupling favors parallel alignment of the layers. +The RKKY coupling also affects the coercive field of the +bottom layers, seen in their reduced coercivity in Fig. +1(d) compared to Fig. 1(c), but the effect is small due to +the greater combined moment of the bottom layers. +The junctions also have significant electrical proper- +ties, derived from their battery-like structure. The cyclic +voltammagram of the junction with 1.9 nm Ru interlayer +thickness is shown in Fig 2(a). The measurement shows +an asymmetric loop with notable redox peaks at around ++1 V on the positive sweep and a broader peak at −0.5 +FIG. 2. (a) Cyclic voltammagram of the junction with 1.9 nm +Ru interlayer taken at 50 mV/s. (b) Current flow (left axis) +through the same junction as (a) driven by ± 2 V toggle +switching (right axis). +(c) Hysteresis loops of the junction +with 2.6 nm Ru recorded at three different applied voltages. +(d) The RKKY coupling of the 2.6 nm Ru junction when +cycling the voltage from −2.5 V to +2.5 V and back to −2.5 +V. At each voltage a minor loop is taken to determine the +RKKY coupling strength. +V on the downward sweep. The asymmetric loop shown +here is typical of an intercalation dominated electrochem- +ical process29. All the junctions measured show similar +behavior. We also cycled the junctions with a stepped +voltage as shown in Fig. 2(b). A stepped voltage is more +technologically relevant than the slow sweep of the cyclic +voltametry. Stepping the voltage between -2 V and +2 V +leads to peak currents larger by two orders of magnitude +passing through the devices. +These currents however, +also quickly decay with a time constant under a second, +showing the relatively rapid ion movement possible with +Li ion-based devices. In Fig. 2(c) we show how applied +dc voltages effect the magnetic layers. The minor hys- +teresis loops from a junction with 2.6 nm Ru are shown, +which shows ferromagnetic coupling (see also Fig. 1(d)). +At negative voltages a relatively narrow loop is seen with +an offset of around +32 mT, which remains at 0 V. At ++2 V applied, which corresponds to the insertion of Li +ions into the magnetic layers, the magnitude of the bias +decreases to around 27 mT and the hysteresis loop broad- +ens. In Fig. 2(d) we show the RKKY coupling strength +of this junction as the voltage is cycled from −2.5 V to ++2.5 V and back in steps of 0.5 V. The RKKY coupling +shows hysteretic behavior, with the switching occurring +around +1 V in the positive sweep direction and 0 V in +the negative sweep direction, roughly consistent with the +peaks seen in the cyclic voltammagram (Fig. 2(a)). +Combined voltage-dependent RKKY coupling data for +all the junctions measured is shown in Fig. 3(a) as a func- +tion of the Ru interlayer thickness. The RKKY coupling +is shown for +2 V and −2 V where the effect of volt- + +60 +(a +b +2 +10 +Applied Voltage (V) +40. +Current (μA) +5 +Current (nA) +20 +0 +0 +0 +-5 +C +-20 +-10 +-40 +2 +2 +¥46810121416 +2 +-1 +0 +0 +7 +Time (s) +Applied Voltage (V) +32 +(arb) +-+2V +31 +MOKE signal +-o< +-2V +30 +29 +28. +-2-1012 +25 +50 +-3 +0 +3 +Applied magnetic field (mT) +Applied voltage (V)3 +FIG. 3. (a) RKKY coupling strength as a function of Ru in- +terlayer thickness for +2 V and −2 V. (b) Difference in RKKY +coupling strength for +2 V and −2 V as a function of Ru in- +terlayer thickness. (c) Coercivity of the junction with 1.85 +nm Ru interlayer thickness at +2 V and −2 V as a function +of voltage cycles. The cycling was carried via ± 2 V switching +as in Fig. 2(b). RKKY coupling of the same junction as (c) +at +2 V and −2 V as a function of voltage cycles (left axis). +The voltage induced change in RKKY as a function of voltage +cycles (right axis). +age is small compared the the effect of the changing Ru +thickness. Negative values are used to indicate antifer- +romagnetic coupling, with positive values corresponding +to ferromagnetic coupling across the Ru interlayer. As a +function of interlayer thickness a peak in antiferromag- +netic RKKY coupling is seen around 1.2 nm Ru followed +by a ferromagnetic peak at around 2.1 nm. This is sim- +ilar to what is expected from previously studied Co/Ru +coupling systems, although the peak antiferromagnetic +coupling is measured at slightly larger Ru thickness16,28. +In Fig. 3(b) the difference between the ± 2 V data is +plotted. Generally, the effect of positive voltages, which +cause the insertion of Li ions into the magnetic layers, is +to reduce the magnitude of the coupling for both anti- +ferromagnetically coupled and ferromagnetically coupled +junctions. However, there is a further effect. The sym- +bols plotted in red show where the effect is reversed, that +is, positive voltages cause an increase in the strength of +RKKY coupling. +This effect occurs around the cross- +ings between antiferromagnetic and ferromagnetic cou- +pling and indicates that as well as a change in strength +there is also a change in the phase of the RKKY coupling +caused by the insertion of Li ions. +In Fig. 3(c)-(d) we show the effect of extensive voltage +cycling on the magnetic properties of the junctions. The +junction with 1.85 nm Ru is cycled as in Fig 2(b) using ± +2 V square pulses. In Fig. 3(c) the change of coercivity +is shown as a function of the number of cycles, demon- +strating a significant drop from an initial coercivity of 10 +mT down to less than 1 mT after 2000 cycles. At the +FIG. 4. (a) Changes in the coercivity and RKKY coupling of +the junction with 1.75 nm Ru at −2 V and +2 V as a function +of the number of voltages cycles. (b) Minor loops at −2 V +and + 2 V after 600 voltage cycles. The blue arrow shows the +extent of all-electrical zero-field switching determined from +minor loops taken after electrical switching. +same time, as shown in Fig. 3(d), the RKKY coupling +becomes more positive, that is its magnitude increases +for both −2 V and +2 V. This leads to an increase also +in the size of the voltage effect on the RKKY coupling +from around 1 mT initially to around 3 mT after exten- +sive cycling. This effect is most likely caused by changes +to the top Co single layer. The cycling of Li ions may +disrupt the Co/Pt interfaces leading to lower anisotropy +and lower effective thickness of the Co layer. The lower +anisotropy is likely to lead to lower coercivity, whilst a +decreased effective thickness of the Co layer will lead to +a higher effective RKKY coupling field. +The effects shown in Fig. 3 can be used to create zero +magnetic field switching of magnetization under an ap- +plied voltage. In Fig 4(a) we show the effect of cycling the +junction with 1.75 nm Ru interlayer thickness which is +slightly ferromagnetically coupled before the application +of voltages. The initial coercivity of the layer is around +10 mT and this is significantly larger than the effects of +voltage on the RKKY coupling (∼ 2 mT). By cycling the +junction the coercivity is reduced and the RKKY cou- +pling at the different voltages shifts. After 600 cycles the +coercivity has dropped below 2 mT, the RKKY coupling +at +2 V has become positive, whilst the RKKY cou- +pling at −2 V is still negative. Firstly, this demonstrates +clearly the effect of the Li ions on the phase of the RKKY +coupling, as the coupling can be switched from ferromag- +netic to antiferromagnetic by the applied voltage. This is +the necessary condition to creating devices based on volt- +age control of the RKKY coupling. Secondly, the shift of +the RKKY coupling caused by the voltage is larger than +the coercivity, and so it should be possible to switch the +magnetization at zero magnetic field. In Fig. 4(b) the +minor hysteresis loops after 1100 cycles are shown. Al- +though the shift in the RKKY coupling is greater than +the coercivity, the loops are slanted, which is consistent +with a reduced anisotropy. From loops starting at zero +applied field taken after electrical cycling the extent of +the all-electrical switching is shown by the blue arrow +in Fig. 4(b) and is equal to around a third of the total +magnetization. The effect of the electrical switching is + +(a) +60- +from -2 V to +2 V (mT) +8 +RKKY coupling (mT) +40 +0 +000 +6 +20. +0 +8 +0. +8 ++2V +4 +0 +00 +-20. +-2 V +0 +2 +-40. +0 +8 +-60. +0 +0 +00 +-80. +0 +0 +-2 +0 +-100- +68 +00 +-120- +1 +2 +3 +1 +2 +3 +Ru Thickness (nm) +Ru Thickness (nm) +(c) +(d) +22 +12 +0 +-2 V +0 +-2V +0 +△RKKY coupling (mT) +0 +00000°0 +0 +X +X +o +2V ++2V +3 +20 +RKKY coupling ( +8 +0 +6 +18. +2 +0 +4 +0 +0 +16- +X00000000 +2 +0 +0 +00 +8 +0. +0 +14 +0 +0 +1000 +0 +2000 +1000 +2000 +Voltage cycles +Voltage cycles(q + Coercivity +2 V +10 +-+2V +8642024 + Coercivity -2 V +RKKY coupling (mT) +98 +MOKE Signal (arb) +-2 V +o- RKKY coupling +1+2 +V +Coercivity (mT) +o- RKKY coupling +-2V +4 +3888888800 +20 +2 +6 +6 +0 +250 +500 +750 +1000 +-25 +0 +25 +Voltage cycles +Applied Field (mT)4 +to shift the net moment between the zero magnetic field +positions on the same side of the hysteresis loop, which +is determined by the initialization, rather than to cause +a crossing of the loop. +In summary, we have shown that the insertion of Li +ions under applied voltages can be used to control the +strength of RKKY coupling in a system with perpendic- +ular magnetization. The effect of the Li ions is not just +to alter the amplitude of the RKKY coupling, but also +its phase. We have shown that RKKY coupling can be +tuned between antiferromagnetic and ferromagnetic with +an applied voltage. We were then able to demonstrate +partial switching of magnetization under an applied volt- +age. The results suggests that magneto-ionic control of +RKKY coupling is a promising approach for the creation +of fully voltage switched magnetic memory devices. +ACKNOWLEDGMENTS +This work was funded by the Academy of Finland un- +der project numbers 316857 and 295269. We acknowl- +edge the provision of facilities by Aalto University at +OtaNano - Micronova Nanofabrication Centre. +1F. Matsukura, Y. Tokura, and H. Ohno, “Control of magnetism +by electric fields,” Nature Nanotechnology 10, 209–220 (2015). +2C. Song, B. Cui, F. Li, X. Zhou, and F. Pan, “Recent progress +in voltage control of magnetism: +Materials, mechanisms, and +performance,” Progress in Materials Science 87, 33–82 (2017). +3X. Liang, A. Matyushov, P. Hayes, V. Schell, C. Dong, H. Chen, +Y. He, A. Will-Cole, E. Quandt, P. Martins, J. McCord, +M. Medarde, S. Lanceros-M´endez, S. van Dijken, N. X. Sun, and +J. Sort, “Roadmap on magnetoelectric materials and devices,” +IEEE Transactions on Magnetics 57, 1–57 (2021). +4M. Nichterwitz, S. Honnali, M. Kutuzau, S. Guo, J. Zehner, +K. Nielsch, and K. Leistner, “Advances in magneto-ionic ma- +terials and perspectives for their application,” APL Materials 9, +030903 (2021). +5Y. Gu, C. Song, Q. Wang, W. Hu, W. Liu, F. Pan, and Z. Zhang, +“Emerging opportunities for voltage-driven magneto-ionic con- +trol in ferroic heterostructures,” APL Materials 9, 040904 (2021). +6J. de Rojas, A. Quintana, G. Rius, C. Stefani, N. Domingo, +J. L. Costa-Kr¨amer, E. Men´endez, and J. Sort, “Voltage control +of magnetism with magneto-ionic approaches: Beyond voltage- +driven oxygen ion migration,” Applied Physics Letters 120, +070501 (2022). +7C. Bi, Y. Liu, T. Newhouse Illige, M. Xu, M. Rosales, J. Freeland, +O. Mryasov, S. Zhang, S. Te Velthuis, and W. Wang, “Reversible +control of co magnetism by voltage-induced oxidation,” Physical +Review Letters 113, 267202 (2014). +8U. Bauer, L. Yao, A. J. Tan, P. Agrawal, S. Emori, H. L. Tuller, +S. van Dijken, and G. S. D. Beach, “Magneto-ionic control of +interfacial magnetism,” Nature Materials 14, 174–181 (2015). +9A. J. Tan, M. Huang, C. O. Avci, F. B¨uttner, M. Mann, W. Hu, +C. Mazzoli, S. Wilkins, H. L. Tuller, and G. S. D. Beach, +“Magneto-ionic control of magnetism using a solid-state proton +pump,” Nature Materials 18, 35–41 (2019). +10M. Ameziane, R. Mansell, V. Havu, P. Rinke, and S. van Dijken, +“Lithium-ion battery technology for voltage control of perpendic- +ular magnetization,” Advanced Functional Materials 32, 2113118 +(2022). +11T. Srivastava, M. Schott, R. Juge, V. Krizakova, M. Belmegue- +nai, Y. Roussign´e, A. Bernand-Mantel, L. Ranno, S. Pizzini, S.- +M. Ch´erif, A. Stashkevich, S. Auffret, O. Boulle, G. Gaudin, +M. Chshiev, C. Baraduc, and H. B´ea, “Large-voltage tun- +ing of Dzyaloshinskii–Moriya interactions: A route toward dy- +namic control of skyrmion chirality,” Nano Letters 18, 4871–4877 +(2018). +12L. Herrera Diez, Y. T. Liu, D. A. Gilbert, M. Belmeguenai, J. Vo- +gel, S. Pizzini, E. Martinez, A. Lamperti, J. B. Mohammedi, +A. Laborieux, +Y. Roussign´e, +A. J. Grutter, +E. Arenholtz, +P. Quarterman, B. Maranville, S. Ono, M. Salah El Hadri, R. Tol- +ley, E. E. Fullerton, L. Sanchez-Tejerina, A. Stashkevich, S. M. +Ch´erif, A. D. Kent, D. Querlioz, J. Langer, B. Ocker, and D. Rav- +elosona, “Nonvolatile ionic modification of the Dzyaloshinskii- +Moriya interaction.” Phys. Rev. Appl. 12, 034005 (2019). +13D. A. Gilbert, J. Olamit, R. K. Dumas, B. J. Kirby, A. J. Grut- +ter, B. B. Maranville, E. Arenholz, J. A. Borchers, and K. Liu, +“Controllable positive exchange bias via redox-driven oxygen mi- +gration,” Nature Communications 7, 11050 (2016). +14M. Huang, M. U. Hasan, K. Klyukin, D. Zhang, D. Lyu, P. Gar- +giani, M. Valvidares, S. Sheffels, A. Churikova, F. B¨uttner, +J. Zehner, L. Caretta, K.-Y. Lee, J. Chang, J.-P. Wang, K. Leist- +ner, B. Yildiz, and G. S. D. Beach, “Voltage control of ferrimag- +netic order and voltage-assisted writing of ferrimagnetic spin tex- +tures,” Nature Nanotechnology 16, 981–988 (2021). +15Y. Shiota, T. Nozaki, F. Bonell, S. Murakami, T. Shinjo, and +Y. Suzuki, “Induction of coherent magnetization switching in a +few atomic layers of FeCo using voltage pulses,” Nature Materials +11, 39–43 (2012). +16S. S. P. Parkin, N. More, and K. P. Roche, “Oscillations in ex- +change coupling and magnetoresistance in metallic superlattice +structures: Co/Ru, Co/Cr, and Fe/Cr,” Phys. Rev. Lett. 64, +2304–2307 (1990). +17S. S. P. Parkin and D. Mauri, “Spin engineering: Direct deter- +mination of the Ruderman-Kittel-Kasuya-Yosida far-field range +function in ruthenium,” Phys. Rev. B 44, 7131–7134 (1991). +18P. Bruno and C. Chappert, “Ruderman-Kittel theory of oscil- +latory interlayer exchange coupling,” Phys. Rev. B 46, 261–270 +(1992). +19R. A. Duine, K.-J. Lee, S. S. P. Parkin, and M. D. Stiles, +“Synthetic antiferromagnetic spintronics,” Nature Physics 14, +217–219 (2018). +20A. E. Kossak, M. Huang, P. Reddy, D. Wolf, and G. S. D. Beach, +“Voltage control of magnetic order in RKKY coupled multilay- +ers,” Science Advances 9, eadd054 (2023). +21P. Bruno, J. Kudrnovsk´y, V. Drchal, and I.Turek, “Interlayer +exchange coupling through ordered and disordered alloy spac- +ers,” Journal of Magnetism and Magnetic Materials 165, 128–133 +(1997). +22J. Kudrnovsk´y, V. Drchal, P. Bruno, I. Turek, and P. Weinberger, +“Interlayer exchange coupling: Effect of the cap,” Phys. Rev. B +56, 8919–8927 (1997). +23A. O. Leon, J. d’Albuquerque e Castro, J. C. Retamal, A. B. +Cahaya, and D. Altbir, “Manipulation of the RKKY exchange +by voltages,” Phys. Rev. B 100, 014403 (2019). +24C.-Y. You and S. Bader, “Bias-voltage-controlled interlayer ex- +change coupling,” IEEE Transactions on Magnetics 35, 2967– +2969 (1999). +25Q. Yang, L. Wang, Z. Zhou, L. Wang, Y. Zhang, S. Zhao, +G. Dong, Y. Cheng, T. Min, Z. Hu, W. Chen, K. Xia, and +M. Liu, “Ionic liquid gating control of RKKY interaction in Fe- +CoB/Ru/FeCoB and (Pt/Co)2/Ru/(Co/Pt)2 multilayers,” Na- +ture Communications 9, 991 (2018). +26T. Newhouse-Illige, Y. Liu, M. Xu, D. R. Hickey, A. Kundu, +H. Almasi, C. Bi, X. Wang, J. W. Freeland, D. J. Keavney, +C. J. Sun, Y. H. Xu, M. Rosales, X. M. Cheng, S. Zhang, K. A. +Mkhoyan, and W. G. Wang, “Voltage-controlled interlayer cou- +pling in perpendicularly magnetized magnetic tunnel junctions,” +Nature Communications 8, 15232 (2017). +27D. Zhang, M. Bapna, W. Jiang, D. Sousa, Y.-C. Liao, Z. Zhao, +Y. Lv, P. Sahu, D. Lyu, A. Naeemi, T. Low, S. A. Majetich, +and J.-P. Wang, “Bipolar electric-field switching of perpendicular +magnetic tunnel junctions through voltage-controlled exchange + +5 +coupling,” Nano Letters 22, 622–629 (2022). +28R. Lavrijsen, A. Fern´andez-Pacheco, D. Petit, R. Mansell, J. H. +Lee, , and R. P. Cowburn, “Tuning the interlayer exchange cou- +pling between single perpendicularly magnetized CoFeB layers,” +Appl. Phys. Lett. 100, 052411 (2012). +29P. Simon and Y. Gogotsi, “Perspectives for electrochemical ca- +pacitors and related devices.” Nature Materials 19, 1151–1163 +(2020). + diff --git a/ItFJT4oBgHgl3EQfvi05/content/tmp_files/load_file.txt b/ItFJT4oBgHgl3EQfvi05/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..b06422479f09ee3dc7f1f51d2884c856005534e3 --- /dev/null +++ b/ItFJT4oBgHgl3EQfvi05/content/tmp_files/load_file.txt @@ -0,0 +1,541 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf,len=540 +page_content='Electric field control of RKKY coupling through solid-state ionics Maria Ameziane, Roy Rosenkamp, Luk´aˇs Flajˇsman, Sebastiaan van Dijken, and Rhodri Mansella) NanoSpin, Department of Applied Physics, Aalto University School of Science, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Box 15100, FI-00076 Aalto, Finland Placing a suitable spacer layer between two magnetic layers can lead to an interaction between the magnetic layers known as Ruderman–Kittel–Kasuya–Yosida (RKKY) coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Controlling RKKY coupling, particu- larly the ability to switch between ferromagnetic and antiferromagnetic coupling, would enable novel magnetic data storage devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' By combining solid-state Li ion battery technology with an out-of-plane magnetized Co/Pt-based stack coupled through a Ru interlayer we investigate the effects of the insertion of Li ions on the magnetic properties of the stack.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The RKKY coupling and its voltage dependence is measured as a function of the Ru interlayer thickness, along with the effects of repeated voltage cycling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The Li ions both change the amplitude of the RKKY coupling and its phase, leading to the ability to switch the RKKY coupling between ferromagnetic and antiferromagnetic with applied voltages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The ability to control magnetism through applied volt- ages opens a path to low energy magnetic data stor- age devices1–3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Among the various approaches to us- ing voltages to control magnetism, magneto-ionics has recently seen increased interest due to the large ef- fects obtainable with this approach4–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The insertion of non-magnetic ions into magnetic layers has been shown to change important magnetic properties such as the saturation magnetization7–9, magnetic anisotropy8–10, Dzyaloshinskii-Moriya interaction11,12 as well as ex- change bias13 and ferrimagnetic order14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' However, for applications in digital memory and logic it is preferable not to change the magnitude of a magnetic property but to cause 180◦ switching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This is not straightfor- wardly achieved with electric fields, which lack the time- symmetry breaking property of magnetic fields14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Whilst pulse switching is possible15, this requires finely tuned magnetic parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' One path is to use two magnetic layers coupled through RKKY interactions16–20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The coupling derives from spin-dependent reflection of the electron wavefunction at the normal metal / magnetic metal interface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This leads to a coupling that oscillates between antiferromagnetic and ferromagnetic coupling as a function of the thickness of the normal metal spacer layer, which is characterized by the wavelength and phase of the oscillation as well as the decay length of the enve- lope of the oscillation16,18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' RKKY coupling is sensitive to changes to the system, as it depends on the electrons at the Fermi surface of the interlayer18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This means the coupling can be modified for instance by doping the interlayer21, modifying the cap- ping layer22, or changing the band filling in the spacer layer23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Control of RKKY coupling is a promising target for devices that aim to control magnetism through elec- tric fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' As well as theoretical proposals for devices24, control has been demonstrated in several experimental systems based on liquid ion gating25 and voltage-induced switching in magnetic tunnel junctions26,27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' However, the switching of tunnel junctions still involves significant a)Electronic mail: rhodri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='mansell@aalto.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='fi current densities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Switching of magnetic layers through voltage control of RKKY interactions has been achieved in Co-based perpendicularly magnetized layers using the insertion of hydrogen ions20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Here we investigate the electric field control of RKKY coupling using a solid-state Li ion based device incorpo- rating a Li storage layer, lithium cobalt oxide (LCO), and an ionic conductor, lithium phosphorous oxynitride (LiPON).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' By using technology taken from the field of solid-state Li ion batteries a large density of Li ions can be provided at low voltages10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Perpendicularly magne- tized Co/Pt layers are used for the fixed and free layers of the device which are coupled through a wedged Ru layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Applying a positive voltage to the top electrode of a junction causes Li ions to move from the storage layer through the ionic conductor to the top layers of the metallic stack10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (a) Cross-sectional schematic of a junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (b) Op- tical microscopy image of the device consisting of vertical bottom electrodes and a horizontal top electrode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The bot- tom electrodes are 100 µm across.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The thickness of the Ru interlayer increases from left to right.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (c) Major hysteresis loop (black) and minor loop (orange) of the junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='0 nm Ru at 0 V showing antiferromagnetic coupling between the free and fixed magnetic layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (d) Major hysteresis loop (black) and minor loop (orange) of the junction with 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='4 nm Ru at 0 V showing ferromagnetic coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1(a) we show a cross-sectional schematic of a junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The total structure consists of a metal bottom electrode which contains all of the magnetic layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The metallic stack is Ta (2 nm) / Pt (4 nm) / [Co (1 nm) / arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='11626v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='mtrl-sci] 27 Jan 2023 signal (arb) 0 880 80 gnal ( 0 0 0 g 0 0 0 s 0 0 E 0 山 0 0 MOKI K 0 0 0 0 o 0 100 100 100 0 100 Applied Field (mT) Applied Field (mT)2 Pt (1 nm)]4 / Co (1 nm) / Ru wedge / Pt (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='25 nm) / Co (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='4 nm) / Pt (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='25 nm) / Ti (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 nm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The bottom Co/Pt multilayer below the Ru wedge acts as the fixed layer of the device, with the top Co single layer acting as the free layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='25 nm Pt layers around the top Co layer lead to perpendicular magnetization while still preserving the RKKY coupling28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The upper layers form the Li ion conduction and storage part of the device and consist of LiPON (70 nm) / LCO (20 nm) / Pt (5 nm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The bottom electrode, consisting of the metallic layers capped by Ti, is patterned by optical lithography and lift off into 100 µm wide stripes, where the length of the stripes is orthogonal to the direction of the Ru wedge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A second lithography step is used to create an insulating SiN layer with windows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The top electrode then cre- ates cross junctions with the metallic bottom electrode through the windows in the SiN layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The SiN acts to reduce shorting at the edges of the junctions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The Ru wedge thickness is estimated from a calibration sample grown with the same wedge parameters as the device.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The fabrication process results in an array of crossbar junctions each with a different average thickness of the Ru interlayer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Figure 1(b) shows an optical microscopy image of the device structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The change of the Ru thickness within each junction is of the order of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='3 ˚A and the thickness increases from left to right.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The bot- tom electrode is grounded and voltages are applied to the top electrode using a Keithley sourcemeter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' To demon- strate the magnetic behavior of the junctions we show in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1(c) the junction with a Ru thickness of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='9 nm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The major hysteresis loop (black) shows two switches coming from negative saturation, firstly the smaller switch of the top Co single layer before 0 mT, followed by the switch of the Co/Pt multilayer at around +80 mT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The thin Pt layers lead to exchange coupling between the bottom five Co layers so that they switch as an effective single layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The top Co layer is RKKY coupled to the bottom layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This leads to an effective bias field on the layer, which it- self depends on the thickness of the Ru interlayer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Here, the top layer minor loop (orange) shows antiferromag- netic coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Coming from negative saturation the top layer switches already at negative fields so that the top layer is aligned antiparallel to the bottom layers at zero magnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1(d), at a different point on the wedge with 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='4 nm of Ru, we measure a minor loop that corresponds to ferromagnetic coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Here the minor loop is shifted to positive applied fields, showing that the RKKY coupling favors parallel alignment of the layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The RKKY coupling also affects the coercive field of the bottom layers, seen in their reduced coercivity in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1(d) compared to Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1(c), but the effect is small due to the greater combined moment of the bottom layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The junctions also have significant electrical proper- ties, derived from their battery-like structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The cyclic voltammagram of the junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='9 nm Ru interlayer thickness is shown in Fig 2(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The measurement shows an asymmetric loop with notable redox peaks at around +1 V on the positive sweep and a broader peak at −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (a) Cyclic voltammagram of the junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='9 nm Ru interlayer taken at 50 mV/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (b) Current flow (left axis) through the same junction as (a) driven by ± 2 V toggle switching (right axis).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (c) Hysteresis loops of the junction with 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='6 nm Ru recorded at three different applied voltages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (d) The RKKY coupling of the 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='6 nm Ru junction when cycling the voltage from −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 V to +2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 V and back to −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' At each voltage a minor loop is taken to determine the RKKY coupling strength.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' V on the downward sweep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The asymmetric loop shown here is typical of an intercalation dominated electrochem- ical process29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' All the junctions measured show similar behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' We also cycled the junctions with a stepped voltage as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A stepped voltage is more technologically relevant than the slow sweep of the cyclic voltametry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Stepping the voltage between -2 V and +2 V leads to peak currents larger by two orders of magnitude passing through the devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' These currents however, also quickly decay with a time constant under a second, showing the relatively rapid ion movement possible with Li ion-based devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2(c) we show how applied dc voltages effect the magnetic layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The minor hys- teresis loops from a junction with 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='6 nm Ru are shown, which shows ferromagnetic coupling (see also Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1(d)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' At negative voltages a relatively narrow loop is seen with an offset of around +32 mT, which remains at 0 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' At +2 V applied, which corresponds to the insertion of Li ions into the magnetic layers, the magnitude of the bias decreases to around 27 mT and the hysteresis loop broad- ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2(d) we show the RKKY coupling strength of this junction as the voltage is cycled from −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 V to +2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 V and back in steps of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='5 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The RKKY coupling shows hysteretic behavior, with the switching occurring around +1 V in the positive sweep direction and 0 V in the negative sweep direction, roughly consistent with the peaks seen in the cyclic voltammagram (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2(a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Combined voltage-dependent RKKY coupling data for all the junctions measured is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3(a) as a func- tion of the Ru interlayer thickness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The RKKY coupling is shown for +2 V and −2 V where the effect of volt- 60 (a b 2 10 Applied Voltage (V) 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Current (μA) 5 Current (nA) 20 0 0 0 5 C 20 10 40 2 2 ¥46810121416 2 1 0 0 7 Time (s) Applied Voltage (V) 32 (arb) +2V 31 MOKE signal o< 2V 30 29 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2-1012 25 50 3 0 3 Applied magnetic field (mT) Applied voltage (V)3 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (a) RKKY coupling strength as a function of Ru in- terlayer thickness for +2 V and −2 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (b) Difference in RKKY coupling strength for +2 V and −2 V as a function of Ru in- terlayer thickness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (c) Coercivity of the junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='85 nm Ru interlayer thickness at +2 V and −2 V as a function of voltage cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The cycling was carried via ± 2 V switching as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' RKKY coupling of the same junction as (c) at +2 V and −2 V as a function of voltage cycles (left axis).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The voltage induced change in RKKY as a function of voltage cycles (right axis).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' age is small compared the the effect of the changing Ru thickness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Negative values are used to indicate antifer- romagnetic coupling, with positive values corresponding to ferromagnetic coupling across the Ru interlayer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' As a function of interlayer thickness a peak in antiferromag- netic RKKY coupling is seen around 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='2 nm Ru followed by a ferromagnetic peak at around 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='1 nm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This is sim- ilar to what is expected from previously studied Co/Ru coupling systems, although the peak antiferromagnetic coupling is measured at slightly larger Ru thickness16,28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3(b) the difference between the ± 2 V data is plotted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Generally, the effect of positive voltages, which cause the insertion of Li ions into the magnetic layers, is to reduce the magnitude of the coupling for both anti- ferromagnetically coupled and ferromagnetically coupled junctions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' However, there is a further effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The sym- bols plotted in red show where the effect is reversed, that is, positive voltages cause an increase in the strength of RKKY coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This effect occurs around the cross- ings between antiferromagnetic and ferromagnetic cou- pling and indicates that as well as a change in strength there is also a change in the phase of the RKKY coupling caused by the insertion of Li ions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3(c)-(d) we show the effect of extensive voltage cycling on the magnetic properties of the junctions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='85 nm Ru is cycled as in Fig 2(b) using ± 2 V square pulses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3(c) the change of coercivity is shown as a function of the number of cycles, demon- strating a significant drop from an initial coercivity of 10 mT down to less than 1 mT after 2000 cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' At the FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (a) Changes in the coercivity and RKKY coupling of the junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='75 nm Ru at −2 V and +2 V as a function of the number of voltages cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' (b) Minor loops at −2 V and + 2 V after 600 voltage cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The blue arrow shows the extent of all-electrical zero-field switching determined from minor loops taken after electrical switching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' same time, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3(d), the RKKY coupling becomes more positive, that is its magnitude increases for both −2 V and +2 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This leads to an increase also in the size of the voltage effect on the RKKY coupling from around 1 mT initially to around 3 mT after exten- sive cycling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This effect is most likely caused by changes to the top Co single layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The cycling of Li ions may disrupt the Co/Pt interfaces leading to lower anisotropy and lower effective thickness of the Co layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The lower anisotropy is likely to lead to lower coercivity, whilst a decreased effective thickness of the Co layer will lead to a higher effective RKKY coupling field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The effects shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3 can be used to create zero magnetic field switching of magnetization under an ap- plied voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig 4(a) we show the effect of cycling the junction with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='75 nm Ru interlayer thickness which is slightly ferromagnetically coupled before the application of voltages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The initial coercivity of the layer is around 10 mT and this is significantly larger than the effects of voltage on the RKKY coupling (∼ 2 mT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' By cycling the junction the coercivity is reduced and the RKKY cou- pling at the different voltages shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' After 600 cycles the coercivity has dropped below 2 mT, the RKKY coupling at +2 V has become positive, whilst the RKKY cou- pling at −2 V is still negative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Firstly, this demonstrates clearly the effect of the Li ions on the phase of the RKKY coupling, as the coupling can be switched from ferromag- netic to antiferromagnetic by the applied voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' This is the necessary condition to creating devices based on volt- age control of the RKKY coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Secondly, the shift of the RKKY coupling caused by the voltage is larger than the coercivity, and so it should be possible to switch the magnetization at zero magnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 4(b) the minor hysteresis loops after 1100 cycles are shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Al- though the shift in the RKKY coupling is greater than the coercivity, the loops are slanted, which is consistent with a reduced anisotropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' From loops starting at zero applied field taken after electrical cycling the extent of the all-electrical switching is shown by the blue arrow in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 4(b) and is equal to around a third of the total magnetization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The effect of the electrical switching is (a) 60- from -2 V to +2 V (mT) 8 RKKY coupling (mT) 40 0 000 6 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 0 8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 8 +2V 4 0 00 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2 V 0 2 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 0 8 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 0 0 00 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 0 0 2 0 100- 68 00 120- 1 2 3 1 2 3 Ru Thickness (nm) Ru Thickness (nm) (c) (d) 22 12 0 2 V 0 2V 0 △RKKY coupling (mT) 0 00000°0 0 X X o +2V +2V 3 20 RKKY coupling ( 8 0 6 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2 0 4 0 0 16- X00000000 2 0 0 00 8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 0 14 0 0 1000 0 2000 1000 2000 Voltage cycles Voltage cycles(q Coercivity +2 V 10 +2V 8642024 Coercivity -2 V RKKY coupling (mT) 98 MOKE Signal (arb) 2 V o- RKKY coupling 1+2 V Coercivity (mT) o- RKKY coupling 2V 4 3888888800 20 2 6 6 0 250 500 750 1000 25 0 25 Voltage cycles Applied Field (mT)4 to shift the net moment between the zero magnetic field positions on the same side of the hysteresis loop,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' which is determined by the initialization,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' rather than to cause a crossing of the loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' In summary, we have shown that the insertion of Li ions under applied voltages can be used to control the strength of RKKY coupling in a system with perpendic- ular magnetization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The effect of the Li ions is not just to alter the amplitude of the RKKY coupling, but also its phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' We have shown that RKKY coupling can be tuned between antiferromagnetic and ferromagnetic with an applied voltage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' We were then able to demonstrate partial switching of magnetization under an applied volt- age.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' The results suggests that magneto-ionic control of RKKY coupling is a promising approach for the creation of fully voltage switched magnetic memory devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' ACKNOWLEDGMENTS This work was funded by the Academy of Finland un- der project numbers 316857 and 295269.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' We acknowl- edge the provision of facilities by Aalto University at OtaNano - Micronova Nanofabrication Centre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 1F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Matsukura, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Tokura, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ohno, “Control of magnetism by electric fields,” Nature Nanotechnology 10, 209–220 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 2C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Song, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Cui, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Li, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhou, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Pan, “Recent progress in voltage control of magnetism: Materials, mechanisms, and performance,” Progress in Materials Science 87, 33–82 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 3X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Matyushov, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Hayes, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Schell, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Dong, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' He, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Will-Cole, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Quandt, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Martins, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' McCord, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Medarde, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lanceros-M´endez, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' van Dijken, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sun, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sort, “Roadmap on magnetoelectric materials and devices,” IEEE Transactions on Magnetics 57, 1–57 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 4M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Nichterwitz, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Honnali, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kutuzau, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Guo, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zehner, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Nielsch, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Leistner, “Advances in magneto-ionic ma- terials and perspectives for their application,” APL Materials 9, 030903 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 5Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Gu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Song, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Hu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Pan, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhang, “Emerging opportunities for voltage-driven magneto-ionic con- trol in ferroic heterostructures,” APL Materials 9, 040904 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 6J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' de Rojas, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Quintana, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rius, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Stefani, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Domingo, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Costa-Kr¨amer, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Men´endez, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sort, “Voltage control of magnetism with magneto-ionic approaches: Beyond voltage- driven oxygen ion migration,” Applied Physics Letters 120, 070501 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 7C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Newhouse Illige, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Xu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rosales, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Freeland, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mryasov, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Te Velthuis, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, “Reversible control of co magnetism by voltage-induced oxidation,” Physical Review Letters 113, 267202 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 8U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bauer, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Yao, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Tan, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Agrawal, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Emori, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Tuller, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' van Dijken, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Beach, “Magneto-ionic control of interfacial magnetism,” Nature Materials 14, 174–181 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 9A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Tan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Huang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Avci, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B¨uttner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mann, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Hu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mazzoli, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wilkins, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Tuller, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Beach, “Magneto-ionic control of magnetism using a solid-state proton pump,” Nature Materials 18, 35–41 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 10M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ameziane, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mansell, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Havu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rinke, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' van Dijken, “Lithium-ion battery technology for voltage control of perpendic- ular magnetization,” Advanced Functional Materials 32, 2113118 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 11T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Srivastava, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Schott, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Juge, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Krizakova, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Belmegue- nai, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Roussign´e, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bernand-Mantel, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ranno, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Pizzini, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='- M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ch´erif, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Stashkevich, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Auffret, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Boulle, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Gaudin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Chshiev, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Baraduc, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B´ea, “Large-voltage tun- ing of Dzyaloshinskii–Moriya interactions: A route toward dy- namic control of skyrmion chirality,” Nano Letters 18, 4871–4877 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 12L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Herrera Diez, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Gilbert, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Belmeguenai, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Vo- gel, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Pizzini, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Martinez, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lamperti, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mohammedi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Laborieux, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Roussign´e, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Grutter, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Arenholtz, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Quarterman, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Maranville, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ono, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Salah El Hadri, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Tol- ley, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Fullerton, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sanchez-Tejerina, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Stashkevich, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ch´erif, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kent, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Querlioz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Langer, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Ocker, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rav- elosona, “Nonvolatile ionic modification of the Dzyaloshinskii- Moriya interaction.” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 12, 034005 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 13D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Gilbert, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Olamit, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Dumas, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kirby, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Grut- ter, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Maranville, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Arenholz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Borchers, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liu, “Controllable positive exchange bias via redox-driven oxygen mi- gration,” Nature Communications 7, 11050 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 14M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Huang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Hasan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Klyukin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lyu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Gar- giani, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Valvidares, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sheffels, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Churikova, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B¨uttner, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zehner, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Caretta, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Chang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Leist- ner, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Yildiz, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Beach, “Voltage control of ferrimag- netic order and voltage-assisted writing of ferrimagnetic spin tex- tures,” Nature Nanotechnology 16, 981–988 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 15Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Shiota, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Nozaki, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bonell, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Murakami, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Shinjo, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Suzuki, “Induction of coherent magnetization switching in a few atomic layers of FeCo using voltage pulses,” Nature Materials 11, 39–43 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 16S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Parkin, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' More, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Roche, “Oscillations in ex- change coupling and magnetoresistance in metallic superlattice structures: Co/Ru, Co/Cr, and Fe/Cr,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 64, 2304–2307 (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 17S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Parkin and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mauri, “Spin engineering: Direct deter- mination of the Ruderman-Kittel-Kasuya-Yosida far-field range function in ruthenium,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B 44, 7131–7134 (1991).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 18P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bruno and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Chappert, “Ruderman-Kittel theory of oscil- latory interlayer exchange coupling,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B 46, 261–270 (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 19R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Duine, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Parkin, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Stiles, “Synthetic antiferromagnetic spintronics,” Nature Physics 14, 217–219 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 20A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kossak, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Huang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Reddy, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wolf, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Beach, “Voltage control of magnetic order in RKKY coupled multilay- ers,” Science Advances 9, eadd054 (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 21P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bruno, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kudrnovsk´y, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Drchal, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='Turek, “Interlayer exchange coupling through ordered and disordered alloy spac- ers,” Journal of Magnetism and Magnetic Materials 165, 128–133 (1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 22J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kudrnovsk´y, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Drchal, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bruno, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Turek, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Weinberger, “Interlayer exchange coupling: Effect of the cap,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B 56, 8919–8927 (1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 23A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Leon, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' d’Albuquerque e Castro, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Retamal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Cahaya, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Altbir, “Manipulation of the RKKY exchange by voltages,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' B 100, 014403 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 24C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' You and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bader, “Bias-voltage-controlled interlayer ex- change coupling,” IEEE Transactions on Magnetics 35, 2967– 2969 (1999).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 25Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Yang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhou, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Dong, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Cheng, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Min, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Hu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Chen, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Xia, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liu, “Ionic liquid gating control of RKKY interaction in Fe- CoB/Ru/FeCoB and (Pt/Co)2/Ru/(Co/Pt)2 multilayers,” Na- ture Communications 9, 991 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 26T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Newhouse-Illige, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Xu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Hickey, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Kundu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Almasi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bi, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Freeland, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Keavney, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Xu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Rosales, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Cheng, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mkhoyan, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, “Voltage-controlled interlayer cou- pling in perpendicularly magnetized magnetic tunnel junctions,” Nature Communications 8, 15232 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 27D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Bapna, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Jiang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sousa, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Liao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Zhao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lv, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Sahu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lyu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Naeemi, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Low, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Majetich, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Wang, “Bipolar electric-field switching of perpendicular magnetic tunnel junctions through voltage-controlled exchange 5 coupling,” Nano Letters 22, 622–629 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 28R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lavrijsen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Fern´andez-Pacheco, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Petit, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Mansell, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lee, , and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Cowburn, “Tuning the interlayer exchange cou- pling between single perpendicularly magnetized CoFeB layers,” Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 100, 052411 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' 29P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Simon and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} +page_content=' Gogotsi, “Perspectives for electrochemical ca- pacitors and related devices.” Nature Materials 19, 1151–1163 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ItFJT4oBgHgl3EQfvi05/content/2301.11626v1.pdf'} diff --git a/K9E1T4oBgHgl3EQfswUE/content/2301.03368v1.pdf b/K9E1T4oBgHgl3EQfswUE/content/2301.03368v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d55d68ba589e01e8474113be1ab22d36360bfb36 --- /dev/null +++ b/K9E1T4oBgHgl3EQfswUE/content/2301.03368v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:073928d9704da49dfc5fe4cd900cccb2f49512b28ade79a274436443eeb082f7 +size 867706 diff --git a/LdE4T4oBgHgl3EQf8Q5Z/content/tmp_files/2301.05346v1.pdf.txt b/LdE4T4oBgHgl3EQf8Q5Z/content/tmp_files/2301.05346v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3ac96eac20baea8eb4d13641f4370926c6b793e --- /dev/null +++ b/LdE4T4oBgHgl3EQf8Q5Z/content/tmp_files/2301.05346v1.pdf.txt @@ -0,0 +1,921 @@ +A Constrained-Optimization Approach to the +Execution of Prioritized Stacks of +Learned Multi-Robot Tasks +Gennaro Notomista +Department of Electrical and Computer Engineering +University of Waterloo +Waterloo, ON, Canada +gennaro.notomista@uwaterloo.ca +Abstract. This paper presents a constrained-optimization formulation +for the prioritized execution of learned robot tasks. The framework lends +itself to the execution of tasks encoded by value functions, such as tasks +learned using the reinforcement learning paradigm. The tasks are en- +coded as constraints of a convex optimization program by using control +Lyapunov functions. Moreover, an additional constraint is enforced in +order to specify relative priorities between the tasks. The proposed ap- +proach is showcased in simulation using a team of mobile robots execut- +ing coordinated multi-robot tasks. +Keywords: Multi-robot motion coordination, Distributed control and +planning, Learning and adaptation in teams of robots +1 +Introduction +Learning complex robotic tasks can be challenging for several reasons. The na- +ture of compound tasks, made up of several simpler subtasks, renders it difficult +to simultaneously capture and combine all features of the subtasks to be learned. +Another limiting factor of the learning process of compound tasks is the com- +putational complexity of machine learning algorithms employed in the learning +phase. This can make the training phase prohibitive, especially when the repre- +sentation of the tasks comprises of a large number of parameters, as it is generally +the case when dealing with complex tasks made up of several subtasks, or in the +case of high-dimensional state space representations. +For these reasons, when there is an effective way of combining the execution +of multiple subtasks, it is useful to break down complex tasks into building blocks +that can be independently learned in a more efficient fashion. Besides the reduced +computational complexity stemming from the simpler nature of the subtasks to +be learned, this approach has the benefit of increasing the modularity of the task +execution framework, by allowing for a reuse of the subtasks as building blocks +for the execution of different complex tasks. Discussions and analyses of such +advantages can be found, for instance, in [26,9,32,16]. +arXiv:2301.05346v1 [cs.RO] 13 Jan 2023 + +2 +Gennaro Notomista +Along these lines, in [13], compositionality and incrementality are recognized +to be two fundamental features of robot learning algorithms. Compositionality, +in the context of learning to execute multiple tasks, is intended as the property of +learning strategies to be in a form that allows them to be combined with previous +knowledge. Incrementality, guarantees the possibility of adding new knowledge +and abilities over time, by, for instance, incorporating new tasks. Several ap- +proaches have been proposed, which exhibit these two properties. Nevertheless, +challenges still remain regarding tasks prioritization and stability guarantees +[21,25,28,34,6]. The possibility of prioritizing tasks together with the stability +guarantees allows us to characterize the behavior resulting from the composition +of multiple tasks. +In fact, when dealing with redundant robotic systems—i.e. systems which +possess more degrees of freedom compared to the minimum number required to +execute a given task, as, for example, multi-robot systems—it is often useful to +allow for the execution of multiple subtasks in a prioritized stack. Task priorities +may allow robots to adapt to the different scenarios in which they are employed +by exhibiting structurally different behaviors. Therefore, it is desirable that a +multi-task execution framework allows for the prioritized execution of multiple +tasks. +In this paper, we present a constrained-optimization robot-control framework +suitable for the stable execution of multiple tasks in a prioritized fashion. This +approach leverages the reinforcement learning (RL) paradigm in order to get +an approximation of the value functions which will be used to encode the tasks +as constraints of a convex quadratic program (QP). Owing to its convexity, +the latter can be solved in polynomial time [3], and it is therefore suitable to +be employed in a large variety of robotic applications, in online settings, even +under real-time constraints. The proposed framework shares the optimization- +based nature with the one proposed in [18] for redundant robotic manipulators, +where, however, it is assumed that a representation for all tasks to be executed +is known a priori. As will be discussed later in the paper, this framework indeed +combines compositionality and incrementality—i.e. the abilities of combining +and adding sub-tasks to build up compound tasks, respectively—with stable +and prioritized task execution in a computationally efficient optimization-based +algorithm. +Figure 1 pictorially shows the strategy adopted in this paper to allow robots +to execute multiple prioritized tasks learned using the RL paradigm. Once a +value function is learned using the RL paradigm (using, e.g., the value itera- +tion algorithm [2]), this learned value function is used to construct a control +Lyapunov function [30] in such a way that a controller synthesized using a min- +norm optimization program is equivalent to the optimal policy corresponding +to the value function [20]. Then, multiple tasks encoded by constraints in a +min-norm controller are combined in a prioritized stack as in [17]. +To summarize, the contributions of this paper are the following: (i) We +present a compositional and incremental framework for the execution of mul- +tiple tasks encoded by value functions; (ii) We show how priorities among tasks + +Multi-Robot Multi-Learned-Tasks +3 +Optimal control +Value functions +Lyapunov functions +Prioritized execution +[2] +[20] +[17] +Fig. 1. Pictorial representation of the strategy adopted in this paper for the execution +of prioritized stacks of learned tasks. +can be enforced in a constrained-optimization-based formulation; (iii) We frame +the prioritized multi-task execution as a convex QP which can be efficiently +solved in online settings; (iv) We demonstrate how the proposed framework can +be employed to control robot teams to execute coordinated tasks. +2 +Background and Related Work +2.1 +Multi-Task Learning, Composition, and Execution +The prioritized execution framework for learned tasks proposed in this paper +can be related to approaches devised for multi-task learning—a machine learn- +ing paradigm which aims at leveraging useful information contained in multiple +related tasks to help improve the generalization performance of all the tasks +[35]. The learning of multiple tasks can happen in parallel (independently) or in +sequence for naturally sequential tasks [10,29], and a number of computational +frameworks have been proposed to learn multiple tasks (see, e.g., [35,14,24], +and references therein). It is worth noticing how, owing to its constrained- +optimization nature, the approach proposed in this paper is dual to multi- +objective optimization frameworks, such as [27,5] or compared to the Riemannian +motion policies [23,15,22]. +Several works have focused on the composition and hierarchy of deep rein- +forcement learning policies. The seminal work [33] shows compositionality for +a specific class of value functions. More general value functions are considered +in [12], where, however, there are no guarantees on the policy resulting from +the multi-task learning process. Boolean and weighted composition of reward, +(Q-)value functions, or policies are considered in [11,19,34]. While these works +have shown their effectiveness on complex systems and tasks, our proposed ap- +proach differs from them in two main aspects: (i) It separates the task learning +from the task composition; (ii) It allows for (possibly time-varying and state- +dependent) task prioritization, with task stacks that are enforced at runtime. + +4 +Gennaro Notomista +2.2 +Constraint-Based Task Execution +In this paper, we adopt a constrained-optimization approach to the prioritized +execution of multiple tasks learned using the RL paradigm. In [17], a constraint- +based task execution framework is presented for a robotic system with control +affine dynamics +˙x = f0(x) + f1(x)u, +(1) +where x ∈ X ⊆ Rn and u ∈ U ⊆ Rm denote state and control input, respec- +tively. The M tasks to be executed are encoded by continuously differentiable, +positive definite cost functions Vi : X → R+, i = 1, . . . , M. With the nota- +tion which will be adopted in this paper, the constraint-based task execution +framework in [17] can be expressed as follows: +minimize +u,δ +∥u∥2 + κ∥δ∥2 +subject to Lf0Vi(x) + Lf1Vi(x)u ≤ −γ(Vi(x)) + δi +i = 1, . . . , M +Kδ ≥ 0, +(2) +where Lf0Vi(x) and Lf1Vi(x) are the Lie derivatives of Vi along the vector fields +f0 and f1, respectively. The components of δ = [δ1, . . . , δM]T are used as slack +variables employed to prioritize the different tasks; γ : R → R is a Lipschitz con- +tinuous extended class K function—i.e. a continuous, monotonically increasing +function, with γ(0) = 0—κ > 0 is an optimization parameter, and K is the pri- +oritization matrix, known a priori, which enforces relative constraints between +components of δ of the following type: δi ≤ lδj, for l ≪ 1, which encodes the +fact that task i is executed at higher priority than task j. +In the following, Section 2.3 will be devoted to showing the connection be- +tween dynamic programming and optimization-based controllers. In Section 3, +this connection will allow us to execute tasks learned using the RL paradigm by +means of a formulation akin to (2). +2.3 +From Dynamic Programming to Constraint-Driven Control +To illustrate how controllers obtained using dynamic programming can be syn- +thesized as the solution of an optimization program, consider a system with the +following discrete-time dynamics: +xk+1 = f(xk, uk). +(3) +These dynamics can be obtained, for instance, by (1), through a discretization +process. In (3), xk denotes the state, uk ∈ Uk(xk) the input, and the input +set Uk(xk) may depend in general on the time k and the state xk. The value +iteration algorithm to solve a deterministic dynamic programming problem with +no terminal cost can be stated as follows [2]: +Jk+1(xk) = +min +uk∈Uk(xk) +� +gk(xk, uk) + Jk(fk(xk, uk)) +� +, +(4) + +Multi-Robot Multi-Learned-Tasks +5 +with J0(x0) = 0, where x0 is the initial state, and gk(xk, uk) is the cost incurred +at time k. The total cost accumulated along the system trajectory is given by +J(x0) = lim +N→∞ +N−1 +� +k=0 +αkgk(xk, uk). +(5) +In this paper, we will consider α = 1 and we will assume there exists a cost-free +termination state.1. +By Proposition 4.2.1 in [2] the value iteration algorithm (4) converges to J⋆ +satisfying +J⋆(x) = +min +u∈U (x) +� +g(x, u) + J⋆(f(x, u)) +� +. +(6) +Adopting an approximation scheme in value space, J⋆ can be replaced by its +approximation ˜J⋆ by solving the following approximate dynamic programming +algorithm: +˜Jk+1(xk) = +min +uk∈Uk(xk) +� +gk(xk, uk) + ˜Jk(fk(xk, uk)) +� +. +In these settings, deep RL algorithms can be leveraged to find parametric ap- +proximations, ˜J⋆, of the value function using neural networks. This will be the +paradigm considered in this paper in order to approximate value functions en- +coding the tasks to be executed in a prioritized fashion. +The bridge between dynamic programming and constraint-driven control is +optimal control. In fact, the cost in (5) is typically considered in optimal control +problems, recalled, in the following, for the continuous time control affine system +(1): +minimize +u(·) +� ∞ +0 +� +q(x(t)) + u(t)T u(t) +� +dt +subject to ˙x = f0(x) + f1(x)u. +(7) +Comparing (7) with (5), we recognize that the instantaneous cost g(x, u) in (5) +in the context of the optimal control problem (7) corresponds to q(x) + uT u, +where q: X → R is a continuously differentiable and positive definite function. +A dynamic programming argument on (7) leads to the following Hamilton- +Jacobi-Bellman equation: +Lf0J⋆(x) − 1 +4Lf1J⋆(x) (Lf1J⋆(x))T + q(x) = 0, +where J∗ is the value function—similar to (6) for continuous-time problems— +representing the minimum cost-to-go from state x, defined as +J⋆(x) = min +u(·) +� ∞ +t +� +q(x(τ)) + u(τ)T u(τ) +� +dτ. +(8) +1 Problems of this class are referred to as shortest path problems in [2] + +6 +Gennaro Notomista +The optimal policy corresponding to the optimal value function (8) can be eval- +uated as follows [4]: +u⋆ = −1 +2 (Lf1J⋆(x))T . +(9) +In order to show how the optimal policy u⋆ in (9) can be obtained using an +optimization-based formulation, we now recall the concept of control Lyapunov +functions. +Definition 1 (Control Lyapunov function [30]). A continuously differen- +tiable, positive definite function V : Rn → R is a control Lyapunov function +(CLF) for the system (1) if, for all x ̸= 0 +inf +u +� +Lf0V (x) + Lf1V (x)u +� +< 0. +(10) +To select a control input u which satisfies the inequality (10), a universal +expression—known as the Sontag’s formula [31]—can be employed. With the aim +of encoding the optimal control input u⋆ by means of a CLF, we will consider +the following modified Sontag’s formula originally proposed in [7]: +u(x) = +� +−v(x) (Lf1V (x))T +if Lf1V (x) ̸= 0 +0 +otherwise, +(11) +where v(x) = +Lf0V (x)+ +� +(Lf0V (x)) +2+q(x)Lf1V (x)(Lf1V (x)) +T +Lf1V (x)(Lf1V (x)) +T +. +As shown in [20], the modified Sontag’s formula (11) is equivalent to the +solution of the optimal control problem (7) if the following relation between the +CLF V and the value function J⋆ holds: +∂J⋆ +∂x = λ(x)∂V +∂x , +(12) +where λ(x) = 2v(x) (Lf1V (x))T . The relation in (12) corresponds to the fact +that the level sets of the CLF V and those of the value function J⋆ are parallel. +The last step towards the constrained-optimization-based approach to gen- +erate optimal control policies is to recognize the fact that, owing to its inverse +optimality property [7], the modified Sontag’s formula (11) can be obtained using +the following constrained-optimization formulation, also known as the pointwise +min-norm controller: +minimize +u +∥u∥2 +subject to Lf0V (x) + Lf1V (x)u ≤ −σ(x), +(13) +where σ(x) = +� +(Lf0V (x))2 + q(x)Lf1V (x) (Lf1V (x))T . This formulation shares +the same optimization structure with the one introduced in (2) in Section 2, +and in the next section we will provide a formulation which strengthens the +connection with approximate dynamic programming. + +Multi-Robot Multi-Learned-Tasks +7 +In Appendix A, additional results are reported, which further illustrate the +theoretical equivalence discussed in this section, by comparing the optimal con- +troller, the optimization-based controller, and a policy learned using the RL +framework for a simple dynamical system. +3 +Prioritized Multi-Task Execution +When V = ˜J⋆, the min-norm controller solution of (13) is the optimal policy +which would be learned using a deep RL algorithm. This is what allows us to +bridge the gap between constraint-driven control and RL and it is the key to +execute tasks learned using the RL paradigm in a compositional, incremental, +prioritized, and computationally-efficient fashion. +Following the formulation given in (2), the multi-task prioritized execution of +tasks learned using RL can be implemented executing the control input solution +of the following optimization program: +minimize +u,δ +∥u∥2 + κ∥δ∥2 +subject to +1 +λi(x) +� +Lf0 ˜J⋆ +i (x) + Lf1 ˜J⋆ +i (x)u +� +≤ −σi(x) + δi, +i = 1, . . . , M +Kδ ≥ 0 +(14) +where ˜J⋆ +1 , . . . , ˜J⋆ +M are the approximated value functions encoding the tasks learned +using the RL paradigm (e.g. value iteration). In summary, with the RL paradigm, +one can get the approximate value functions ˜J⋆ +1 , . . . , ˜J⋆ +M; the robotic system is +then controlled using the control input solution of (14) in order to execute these +tasks in a prioritized fashion. +Remark 1. The Lie derivatives Lf0 ˜J⋆ +1 (x), . . . , Lf0 ˜J⋆ +M(x) contain the gradients +∂J⋆ +1 +∂x , . . . , ∂J⋆ +M +∂x . When ˜J⋆ +1 (x), . . . , ˜J⋆ +M(x) are approximated using neural networks, +these gradients can be efficiently computed using back propagation. +We conclude this section with the following Proposition 1, which ensures the +stability of the prioritized execution of multiple tasks encoded through the value +functions ˜J⋆ +i by a robotic system modeled by the dynamics (1) and controlled +with control input solution of (14). +Proposition 1 (Stability of multiple prioritized learned tasks). Con- +sider executing a set of M prioritized tasks encoded by approximate value func- +tions ˜J⋆ +i , i = 1, . . . , M, by solving the optimization problem in (14). Assume the +following: +1. All constraints in (14) are active +2. The robotic system can be modeled by driftless control affine dynamical sys- +tem, i.e. f0(x) = 0 ∀x ∈ X +3. The instantaneous cost function g used to learn the tasks is positive for all +x ∈ X and u ∈ U . + +8 +Gennaro Notomista +Then, +� +�� +˜J⋆ +1 (x(t)) +... +˜J⋆ +M(x(t)) +� +�� → N(K), +as t → ∞, where N(K) denotes the null space of the prioritization matrix K. +That is, the tasks will be executed according to the priorities specified by the +prioritization matrix K in (2). +Proof. The Lagrangian associated with the optimization problem (14) is given +by L(u, δ) = ∥u∥2 + κ∥δ∥2 + ηT +1 +� +ˆf0(x) + ˆf1(x)u + σ(x) − δ +� ++ ηT +2 (−Kδ), where +ˆf0(x) ∈ RM and ˆf1(x) ∈ RM×m defined as follows: the i-th component of ˆf0(x) is +equal to +1 +λi(x)Lf0 ˜J⋆ +i (x), while the i-th row of ˆfi(x) is equal to +1 +λi(x)Lf1 ˜J⋆ +i (x). η1 +and η2 are the Lagrange multipliers corresponding to the task and prioritization +constraints, respectively. +From the KKT conditions, we obtain: +u = −1 +2 +� ˆf1(x)T 0 +� +η +δ = 1 +2κ +� +I −KT � +η, +(15) +where η = [ηT +1 , ηT +2 ]T . By resorting to the Lagrange dual problem, and by using +assumption 1, we get the following expression for η: +η = 2 +� I +κ + ˆf1(x) ˆf1(x)T −KT +K +KKT +�−1 +� +�� +� +A−1 +1 +� ˆf0(x) + σ(x) +0 +� +� +�� +� +b0 +, +(16) +where I denotes an identity matrix of appropriate size. Substituting (16) in (15), +we get u = − +� ˆf1(x)T 0 +� +A−1 +1 b0 and δ = 1 +κ +� +I −KT � +A−1 +1 b0. +To show the claimed stability property, we will proceed by a Lyapunov argu- +ment. Let us consider the Lyapunov function candidate V (x) = 1 +2 ˜J(x)⋆T KT K ˜J⋆(x), +where ˜J⋆(x) = +� ˜J⋆ +1 (x) . . . ˜J⋆ +M(x) +�T . The time derivative of V evaluates to: +˙V = ∂V +∂x ˙x = ˜J(x)⋆T KT K ∂ ˜J⋆ +∂x ˙x += ˜J(x)⋆T KT K ∂ ˜J⋆ +∂x f1(x) +� +�� +� +ˆ +f1,λ(x) +u +(by assumption 2), +where, notice that ˆf1,λ(x) = Λ(x) ˆf1(x) ∈ RM and Λ(x) = diag ([λ1(x), . . . , λM(x)]). +By assumption 2, λi(x) ≥ 0 for all i, and therefore Λ(x) ⪰ 0, i.e. Λ(x) is positive +semidefinite. Then, ˙V = ˜J(x)⋆T KT KΛ(x) ˆf1(x)u = − ˜J(x)⋆T KT KΛ(x) ˆA +�σ(x) +0 +� +, +where +ˆA = ˆf1(x) +� ˆf1(x)T 0 +� � I +κ + ˆf1(x) ˆf1(x)T −KT +K +KKT +�−1 +⪰ 0 + +Multi-Robot Multi-Learned-Tasks +9 +as in Proposition 3 in [17], and we used assumption 2 to simplify the expression +of b0. +By assumption 3, it follows that the value functions ˜J⋆ +i are positive definite. +Therefore, from the definition of σ, in a neighborhood of 0 ∈ RM, we can bound +σ(x)—defined by the gradients of ˜J⋆—by the value of ˜J⋆ as σ(x) = γJ( ˜J⋆(x)), +where γJ is a class K function. +Then, proceeding similarly to Proposition 3 in [17], we can bound ˙V as fol- +lows: ˙V = − ˜J(x)⋆T KT KΛ(x) ˆAγJ( ˜J(x)) ≤ − ˜J(x)⋆T KT KΛ(x) ˜J(x) ≤ −¯λV (x), +where ¯λ = min{λ1(x), . . . , λM(x)}. Hence, K ˜J⋆(x(t)) → 0 as t → ∞, and +˜J⋆(x(t)) → N (K) as t → ∞. +Remark 2. The proof of Proposition 1 can be carried out even in case of time- +varying and state-dependent prioritization matrix K. Under the assumption that +K is bounded and continuously differentiable for all x and uniformly in time, +the norm and the gradient of K can be bounded in order to obtain an upper +bound for ˙V . +Remark 3. Even when the prioritization stack specified through the matrix K in +(14) is not physically realizable—due to the fact that, for instance, the functions +encoding the tasks cannot achieve the relative values prescribed by the prioriti- +zation matrix—the optimization program will still be feasible. Nevertheless, the +tasks will not be executed with the desired priorities and even the execution of +high-priority tasks might be degraded. +4 +Experimental Results +In this section, the proposed framework for the execution of prioritized stacks +of tasks is showcased in simulation using a team of mobile robots. Owing to the +multitude of robotic units of which they are comprised, multi-robot systems are +often highly redundant with respect to the tasks they have to execute. Therefore, +they perfectly lend themselves to the concurrent execution of multiple prioritized +tasks. +4.1 +Multi-Robot Tasks +For multi-robot systems, the redundancy stems from the multiplicity of robotic +units of which the system is comprised. In this section, we will showcase the +execution of dependent tasks—two tasks are dependent if executing one prevents +the execution of the other [1]—in different orders of priority. The multi-robot +system is comprised of 6 planar robots modeled with single integrator dynamics +and controlled to execute the following 4 tasks: All robots assemble an hexagonal +formation (task T1), robot 1 goes to goal point 1 (task T2), robot 2 goes to +goal point 2 (task T3), robot 3 goes to goal point 3 (task T4). While Task 1 is +independent of each of the other tasks taken singularly, it is not independent of +any pair of tasks 2, 3, and 4. This intuitively corresponds to the fact that it is +possible to form a hexagonal formation in different points in space, but it might + +10 +Gennaro Notomista +(a) t = 0s +(b) t = 5s +(c) t = 10s +(d) t = 15s +(e) t = 19s +(f) t = 23s +(g) t = 27s +(h) t = 30s +(i) t = 34s +(j) t = 38s +(k) t = 42s +(l) t = 45s +0 +10 +20 +30 +40 +Time [s] +0 +5 +10 +15 +(m) +Fig. 2. Snapshots (2a-2l) and plot of ˜J⋆ +1 , ˜J⋆ +2 , ˜J⋆ +3 , and ˜J⋆ +4 (2m) corresponding to the +hexagonal formation control task and 3 go-to-goal tasks for robots 1, 2, and 3, re- +spectively, recorded during the course of a simulated experiment with a multi-robot +system. Robots are gray dots, connection edges between the robots used to assemble +the desired formation are depicted in blue, goal points are shown as red dots. + +22 +32 +32 +32 +32 +32 +3 +12 +32 +32 +32 +32 +3Multi-Robot Multi-Learned-Tasks +11 +not be feasible to form a hexagonal formation while two robots are constrained +to be in two pre-specified arbitrary locations. +Figure 2 reports a sequence of snapshots and the graph of the value functions +encoding the four tasks recorded during the course of the experiment. Denoting +by Ti ≺ Tj the condition under which task Ti has priority higher than Tj, the +sequence of prioritized stacks tested in the experiment are the following: +� +� +� +� +� +T2, T3, T4 ≺ T1 +0s ≤ t < 15s +T1 ≺ T2, T3, T4 +15s ≤ t < 30s +T1 ≺ T2 ≺ T3, T4 +30s ≤ t ≤ 45s. +The plot of the value functions in Fig. 2l shows how, for 0s ≤ t < 15s, since +the hexagonal formation control algorithm has lower priority compared to the +three go-to-goal tasks, its value function ˜J⋆ +1 is allowed to grow while the other +three value functions are driven to 0 by the velocity control input solution of +(14) supplied to the robots. For 15s ≤ t < 30s, the situation is reversed: the +hexagonal formation control is executed with highest priority while the value +functions encoding the three go-to-goal tasks are allowed to grow—a condition +which corresponds to the non-execution of the tasks. Finally, for 30s ≤ t ≤ 45s, +task T2, i.e. go-to-goal task for robot 1 to goal point 1 is added at higher priority +with respect to tasks T3 and T4. Since this is independent by task T1, it can +be executed at the same time. As a result, as can be seen from the snapshots, +the formation translates towards the red point marked with 1. Tasks T1 and T2 +are successfully executed while tasks T3 and T4 are not executed since are not +independent by the first two and they have lower priority. +Remark 4. The optimization program responsible for the execution of multiple +prioritized tasks encoded by value functions is solved at each iteration of the +robot control loop. This illustrates how the convex optimization formulation of +the developed framework is computationally efficient and therefore amenable +to be employed in online settings. Alternative approaches for task prioritiza- +tion and allocation in the context of multi-robot systems generally result in +(mixed-)integer optimization programs, which are often characterized by a com- +binatorial nature and are not always suitable for an online implementation [8]. +4.2 +Discussion +The experiments of the previous section highlight several amenable properties +of the framework developed in this paper for the prioritized execution of tasks +encoded by a value function. First of all, its compositionality is given by the +fact that tasks can easily be inserted and removed by adding and removing +constraints from the optimization program (14). For the same reason the frame- +work is incremental and modular as it allows for building a complex task using +a number of subtasks which can be incrementally added to the constraints of an +optimization-based controller. Moreover, it allows for seamless incorporation of +priorities among tasks, and, as we showcased in Section 4.1, these priority can + +12 +Gennaro Notomista +also be switched in an online fashion, in particular without the need of stopping +and restarting the motion of the robots. Furthermore, Proposition 1 shows that +the execution of multiple tasks using the constraint-driven control is stable and +the robotic system will indeed execute the given tasks according to the speci- +fied priorities. Finally, as the developed optimization program is a convex QP, +its low computational complexity allows for an efficient implementation in on- +line settings even under real-time constraints on computationally limited robotic +platforms. +5 +Conclusion +In this paper, we presented an optimization-based framework for the prioritized +execution of multiple tasks encoded by value functions. The approach com- +bines control-theoretic and learning techniques in order to exhibit properties +of compositionality, incrementality, stability, and low computational complexity. +These properties render the proposed framework suitable for online and real- +time robotic implementations. A multi-robot simulated scenario illustrated its +effectiveness in the control of a redundant robotic system executing a prioritized +stack of tasks. +A +Comparison Between Optimal Control, +Optimization-Based Control, and RL policy +To compare optimal controller, optimization-based controller, and RL policy, +in this section, we consider the stabilization of a double integrator system to +the origin. The system dynamics are given by: ˙x = +�0 1 +0 0 +� +x + +�0 +1 +� +u, where x = +[x1, x2]T ∈ R2 and u ∈ R. The instantaneous cost considered in the optimal +control problem (7) is given by q(x)+u2 where q(x) = xT x. The reward function +of the value iteration algorithm employed to learn an approximate representation +of the value function has been set to g(x, u) = −q(x) − u2, and the resulting +value function ˜J⋆ has been shifted so that ˜J⋆(0) = 0. +The results of the comparison are reported in Fig. 3. Here, the optimization- +based controller solution of (13) with V = ˜J⋆ is compared to the optimal con- +troller given in (9), and the RL policy corresponding to the approximate value +function ˜J⋆. As can be seen, the optimization-based controller and the optimal +controller coincide, while the RL policy becomes closer and closer as the number +of training epochs increases. +B +Implementation Details +The results reported in Section 4 have been obtained using a custom value func- +tion learning algorithm written in Python. The details of each multi-robot task +are given in the following. + +Multi-Robot Multi-Learned-Tasks +13 +0 +2 +4 +6 +8 +10 +12 +14 +16 +18 +20 +0 +5 +10 +Optimal controller +Optimization-based controller +RL policy (1000 epochs) +RL policy (500 epochs) +RL policy (250 epochs) +RL policy (125 epochs) +0 +2 +4 +6 +8 +10 +12 +14 +16 +18 +20 +-4 +-3 +-2 +-1 +0 +Optimal controller +Optimization-based controller +RL policy (1000 epochs) +RL policy (500 epochs) +RL policy (250 epochs) +RL policy (125 epochs) +Fig. 3. Comparison between the optimal controller (given in (9)), RL policy (based +on the approximate value function ˜J⋆ (8)), and optimization-based controller (solution +of (13) with V = ˜J⋆) employed to stabilize a double-integrator system to the origin +of its state space, i.e. driving both x1 and x2 to 0. As can be seen, when trained for +a sufficiently long time, the RL policy results in the optimal controller, which is also +equivalent to the optimization-based controller. +Each robot in the team of N robots is modeled using single integrator dy- +namics ˙xi = ui, where xi, ui ∈ R2 are position and velocity input of robot i. +The ensemble state and input will be denoted by x and u, respectively. For +the formation control task, the expression of the cost g is given by g(x, u) = +1000 − 0.01(−E(x) − 10∥u∥2), where the value of E(x) is the formation energy +defined as E(x) = �N +i=1 +� +j∈Ni(∥xi − xj∥2 − W 2 +ij)2, Ni being the neighborhood +of robot i, i.e. the set of robots with which robot i shares an edge, and +W = +� +������� +0 +l +√ +3l 2l 0 l +l +0 +l +0 2l 0 +√ +3l l +0 +l 0 2l +2l +0 +l +0 l 0 +0 +2l +0 +l 0 l +l +0 +2l +0 l 0 +� +������� +with l = 1. The entry ij of the matrix W corresponds to the desired distance to +be maintained between robots i and j. +The cost function g for the go-to-goal tasks is given by g(x, u) = 100 − +0.01(−∥x − ˆx∥2 − ∥u∥2), where ˆx ∈ R2 is the desired goal point. +Remark 5 (Combination of single-robot and multi-robot tasks). Single-robot tasks +(e.g. the go-to-goal tasks considered in this paper) are combined with multi-robot + +14 +Gennaro Notomista +tasks (e.g. the formation control task) by defining the task gradient required to +compute Lf0 ˜J⋆ +i (x) and Lf1 ˜J⋆ +i (x) in the optimization program (14) in the follow- +ing way: ∂ ˜ +J⋆ +i +∂x = +� +0 · · · 0 +∂ ˜ +J⋆ +ij +∂x 0 · · · 0 +� +, where the j-th entry ˜J⋆ +ij is the approximate +value function for task i and robot j. +References +1. Antonelli, G.: Stability analysis for prioritized closed-loop inverse kinematic al- +gorithms for redundant robotic systems. IEEE Transactions on Robotics 25(5), +985–994 (2009). DOI 10.1109/TRO.2009.2017135 +2. Bertsekas, D.P.: Reinforcement learning and optimal control. +Athena Scientific +Belmont, MA (2019) +3. Boyd, S., Vandenberghe, L.: Convex optimization. Cambridge University Press +(2004) +4. Bryson, A.E., Ho, Y.C.: Applied optimal control: optimization, estimation, and +control. Routledge (2018) +5. Bylard, A., Bonalli, R., Pavone, M.: Composable geometric motion policies using +multi-task pullback bundle dynamical systems. arXiv preprint arXiv:2101.01297 +(2021) +6. Dulac-Arnold, G., Mankowitz, D., Hester, T.: Challenges of real-world reinforce- +ment learning. arXiv preprint arXiv:1904.12901 (2019) +7. Freeman, R.A., Primbs, J.A.: Control lyapunov functions: New ideas from an old +source. In: Proceedings of 35th IEEE Conference on Decision and Control, vol. 4, +pp. 3926–3931. IEEE (1996) +8. Gerkey, B.P., Matari´c, M.J.: A formal analysis and taxonomy of task allocation in +multi-robot systems. The International journal of robotics research 23(9), 939–954 +(2004) +9. Ghosh, D., Singh, A., Rajeswaran, A., Kumar, V., Levine, S.: Divide-and-conquer +reinforcement learning. arXiv preprint arXiv:1711.09874 (2017) +10. Gupta, A., Yu, J., Zhao, T.Z., Kumar, V., Rovinsky, A., Xu, K., Devlin, T., +Levine, S.: Reset-free reinforcement learning via multi-task learning: Learning +dexterous manipulation behaviors without human intervention. +arXiv preprint +arXiv:2104.11203 (2021) +11. Haarnoja, T., Pong, V., Zhou, A., Dalal, M., Abbeel, P., Levine, S.: Composable +deep reinforcement learning for robotic manipulation. In: 2018 IEEE international +conference on robotics and automation (ICRA), pp. 6244–6251. IEEE (2018) +12. Haarnoja, T., Tang, H., Abbeel, P., Levine, S.: Reinforcement learning with deep +energy-based policies. In: International Conference on Machine Learning, pp. 1352– +1361. PMLR (2017) +13. Kaelbling, L.P.: The foundation of efficient robot learning. +Science 369(6506), +915–916 (2020) +14. Micchelli, C.A., Pontil, M.: Kernels for multi–task learning. +In: NIPS, vol. 86, +p. 89. Citeseer (2004) +15. Mukadam, M., Cheng, C.A., Fox, D., Boots, B., Ratliff, N.: Riemannian motion +policy fusion through learnable lyapunov function reshaping. In: Conference on +Robot Learning, pp. 204–219. PMLR (2020) +16. Nachum, O., Gu, S., Lee, H., Levine, S.: Data-efficient hierarchical reinforcement +learning. arXiv preprint arXiv:1805.08296 (2018) + +Multi-Robot Multi-Learned-Tasks +15 +17. Notomista, G., Mayya, S., Hutchinson, S., Egerstedt, M.: An optimal task allo- +cation strategy for heterogeneous multi-robot systems. In: 2019 18th European +Control Conference (ECC), pp. 2071–2076. IEEE (2019) +18. Notomista, G., Mayya, S., Selvaggio, M., Santos, M., Secchi, C.: A set-theoretic +approach to multi-task execution and prioritization. In: 2020 IEEE International +Conference on Robotics and Automation (ICRA), pp. 9873–9879. IEEE (2020) +19. Peng, X.B., Chang, M., Zhang, G., Abbeel, P., Levine, S.: Mcp: Learning compos- +able hierarchical control with multiplicative compositional policies. arXiv preprint +arXiv:1905.09808 (2019) +20. Primbs, J.A., Nevisti´c, V., Doyle, J.C.: Nonlinear optimal control: A control lya- +punov function and receding horizon perspective. Asian Journal of Control 1(1), +14–24 (1999) +21. Qureshi, A.H., Johnson, J.J., Qin, Y., Henderson, T., Boots, B., Yip, M.C.: Com- +posing task-agnostic policies with deep reinforcement learning. +arXiv preprint +arXiv:1905.10681 (2019) +22. Rana, M.A., Li, A., Ravichandar, H., Mukadam, M., Chernova, S., Fox, D., Boots, +B., Ratliff, N.: Learning reactive motion policies in multiple task spaces from hu- +man demonstrations. In: Conference on Robot Learning, pp. 1457–1468. PMLR +(2020) +23. Ratliff, N.D., Issac, J., Kappler, D., Birchfield, S., Fox, D.: Riemannian motion +policies. arXiv preprint arXiv:1801.02854 (2018) +24. Ruder, S.: An overview of multi-task learning in deep neural networks. +arXiv +preprint arXiv:1706.05098 (2017) +25. Sahni, H., Kumar, S., Tejani, F., Isbell, C.: Learning to compose skills. +arXiv +preprint arXiv:1711.11289 (2017) +26. Schwartz, A., Thrun, S.: Finding structure in reinforcement learning. Advances in +neural information processing systems 7, 385–392 (1995) +27. Sener, O., Koltun, V.: Multi-task learning as multi-objective optimization. arXiv +preprint arXiv:1810.04650 (2018) +28. Singh, S.P.: Transfer of learning by composing solutions of elemental sequential +tasks. Machine Learning 8(3), 323–339 (1992) +29. Smith, V., Chiang, C.K., Sanjabi, M., Talwalkar, A.: Federated multi-task learning. +arXiv preprint arXiv:1705.10467 (2017) +30. Sontag, E.D.: A lyapunov-like characterization of asymptotic controllability. SIAM +journal on control and optimization 21(3), 462–471 (1983) +31. Sontag, E.D.: A ’universal’ construction of artstein’s theorem on nonlinear stabi- +lization. Systems & control letters 13(2), 117–123 (1989) +32. Teh, Y.W., Bapst, V., Czarnecki, W.M., Quan, J., Kirkpatrick, J., Hadsell, R., +Heess, N., Pascanu, R.: Distral: Robust multitask reinforcement learning. arXiv +preprint arXiv:1707.04175 (2017) +33. Todorov, E.: Compositionality of optimal control laws. Advances in neural infor- +mation processing systems 22, 1856–1864 (2009) +34. Van Niekerk, B., James, S., Earle, A., Rosman, B.: Composing value functions +in reinforcement learning. In: International Conference on Machine Learning, pp. +6401–6409. PMLR (2019) +35. Zhang, Y., Yang, Q.: A survey on multi-task learning. +IEEE Transactions on +Knowledge and Data Engineering (2021) + diff --git a/LdE4T4oBgHgl3EQf8Q5Z/content/tmp_files/load_file.txt b/LdE4T4oBgHgl3EQf8Q5Z/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..c4607e69a0f6e7c1cd967d8d911a454d014e5382 --- /dev/null +++ b/LdE4T4oBgHgl3EQf8Q5Z/content/tmp_files/load_file.txt @@ -0,0 +1,508 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf,len=507 +page_content='A Constrained-Optimization Approach to the Execution of Prioritized Stacks of Learned Multi-Robot Tasks Gennaro Notomista Department of Electrical and Computer Engineering University of Waterloo Waterloo, ON, Canada gennaro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='notomista@uwaterloo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='ca Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This paper presents a constrained-optimization formulation for the prioritized execution of learned robot tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The framework lends itself to the execution of tasks encoded by value functions, such as tasks learned using the reinforcement learning paradigm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The tasks are en- coded as constraints of a convex optimization program by using control Lyapunov functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Moreover, an additional constraint is enforced in order to specify relative priorities between the tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The proposed ap- proach is showcased in simulation using a team of mobile robots execut- ing coordinated multi-robot tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Keywords: Multi-robot motion coordination, Distributed control and planning, Learning and adaptation in teams of robots 1 Introduction Learning complex robotic tasks can be challenging for several reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The na- ture of compound tasks, made up of several simpler subtasks, renders it difficult to simultaneously capture and combine all features of the subtasks to be learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Another limiting factor of the learning process of compound tasks is the com- putational complexity of machine learning algorithms employed in the learning phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This can make the training phase prohibitive, especially when the repre- sentation of the tasks comprises of a large number of parameters, as it is generally the case when dealing with complex tasks made up of several subtasks, or in the case of high-dimensional state space representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' For these reasons, when there is an effective way of combining the execution of multiple subtasks, it is useful to break down complex tasks into building blocks that can be independently learned in a more efficient fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Besides the reduced computational complexity stemming from the simpler nature of the subtasks to be learned, this approach has the benefit of increasing the modularity of the task execution framework, by allowing for a reuse of the subtasks as building blocks for the execution of different complex tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Discussions and analyses of such advantages can be found, for instance, in [26,9,32,16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='05346v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='RO] 13 Jan 2023 2 Gennaro Notomista Along these lines, in [13], compositionality and incrementality are recognized to be two fundamental features of robot learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Compositionality, in the context of learning to execute multiple tasks, is intended as the property of learning strategies to be in a form that allows them to be combined with previous knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Incrementality, guarantees the possibility of adding new knowledge and abilities over time, by, for instance, incorporating new tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Several ap- proaches have been proposed, which exhibit these two properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Nevertheless, challenges still remain regarding tasks prioritization and stability guarantees [21,25,28,34,6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The possibility of prioritizing tasks together with the stability guarantees allows us to characterize the behavior resulting from the composition of multiple tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In fact, when dealing with redundant robotic systems—i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' systems which possess more degrees of freedom compared to the minimum number required to execute a given task, as, for example, multi-robot systems—it is often useful to allow for the execution of multiple subtasks in a prioritized stack.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Task priorities may allow robots to adapt to the different scenarios in which they are employed by exhibiting structurally different behaviors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Therefore, it is desirable that a multi-task execution framework allows for the prioritized execution of multiple tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In this paper, we present a constrained-optimization robot-control framework suitable for the stable execution of multiple tasks in a prioritized fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This approach leverages the reinforcement learning (RL) paradigm in order to get an approximation of the value functions which will be used to encode the tasks as constraints of a convex quadratic program (QP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Owing to its convexity, the latter can be solved in polynomial time [3], and it is therefore suitable to be employed in a large variety of robotic applications, in online settings, even under real-time constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The proposed framework shares the optimization- based nature with the one proposed in [18] for redundant robotic manipulators, where, however, it is assumed that a representation for all tasks to be executed is known a priori.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' As will be discussed later in the paper, this framework indeed combines compositionality and incrementality—i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' the abilities of combining and adding sub-tasks to build up compound tasks, respectively—with stable and prioritized task execution in a computationally efficient optimization-based algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Figure 1 pictorially shows the strategy adopted in this paper to allow robots to execute multiple prioritized tasks learned using the RL paradigm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Once a value function is learned using the RL paradigm (using, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', the value itera- tion algorithm [2]), this learned value function is used to construct a control Lyapunov function [30] in such a way that a controller synthesized using a min- norm optimization program is equivalent to the optimal policy corresponding to the value function [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Then, multiple tasks encoded by constraints in a min-norm controller are combined in a prioritized stack as in [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' To summarize, the contributions of this paper are the following: (i) We present a compositional and incremental framework for the execution of mul- tiple tasks encoded by value functions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (ii) We show how priorities among tasks Multi-Robot Multi-Learned-Tasks 3 Optimal control Value functions Lyapunov functions Prioritized execution [2] [20] [17] Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Pictorial representation of the strategy adopted in this paper for the execution of prioritized stacks of learned tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' can be enforced in a constrained-optimization-based formulation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (iii) We frame the prioritized multi-task execution as a convex QP which can be efficiently solved in online settings;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (iv) We demonstrate how the proposed framework can be employed to control robot teams to execute coordinated tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 2 Background and Related Work 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='1 Multi-Task Learning, Composition, and Execution The prioritized execution framework for learned tasks proposed in this paper can be related to approaches devised for multi-task learning—a machine learn- ing paradigm which aims at leveraging useful information contained in multiple related tasks to help improve the generalization performance of all the tasks [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The learning of multiple tasks can happen in parallel (independently) or in sequence for naturally sequential tasks [10,29], and a number of computational frameworks have been proposed to learn multiple tasks (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', [35,14,24], and references therein).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' It is worth noticing how, owing to its constrained- optimization nature, the approach proposed in this paper is dual to multi- objective optimization frameworks, such as [27,5] or compared to the Riemannian motion policies [23,15,22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Several works have focused on the composition and hierarchy of deep rein- forcement learning policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The seminal work [33] shows compositionality for a specific class of value functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' More general value functions are considered in [12], where, however, there are no guarantees on the policy resulting from the multi-task learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Boolean and weighted composition of reward, (Q-)value functions, or policies are considered in [11,19,34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' While these works have shown their effectiveness on complex systems and tasks, our proposed ap- proach differs from them in two main aspects: (i) It separates the task learning from the task composition;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (ii) It allows for (possibly time-varying and state- dependent) task prioritization, with task stacks that are enforced at runtime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 4 Gennaro Notomista 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='2 Constraint-Based Task Execution In this paper, we adopt a constrained-optimization approach to the prioritized execution of multiple tasks learned using the RL paradigm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In [17], a constraint- based task execution framework is presented for a robotic system with control affine dynamics ˙x = f0(x) + f1(x)u, (1) where x ∈ X ⊆ Rn and u ∈ U ⊆ Rm denote state and control input, respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The M tasks to be executed are encoded by continuously differentiable, positive definite cost functions Vi : X → R+, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' With the nota- tion which will be adopted in this paper, the constraint-based task execution framework in [17] can be expressed as follows: minimize u,δ ∥u∥2 + κ∥δ∥2 subject to Lf0Vi(x) + Lf1Vi(x)u ≤ −γ(Vi(x)) + δi i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , M Kδ ≥ 0, (2) where Lf0Vi(x) and Lf1Vi(x) are the Lie derivatives of Vi along the vector fields f0 and f1, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The components of δ = [δ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , δM]T are used as slack variables employed to prioritize the different tasks;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' γ : R → R is a Lipschitz con- tinuous extended class K function—i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' a continuous, monotonically increasing function, with γ(0) = 0—κ > 0 is an optimization parameter, and K is the pri- oritization matrix, known a priori, which enforces relative constraints between components of δ of the following type: δi ≤ lδj, for l ≪ 1, which encodes the fact that task i is executed at higher priority than task j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In the following, Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='3 will be devoted to showing the connection be- tween dynamic programming and optimization-based controllers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In Section 3, this connection will allow us to execute tasks learned using the RL paradigm by means of a formulation akin to (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='3 From Dynamic Programming to Constraint-Driven Control To illustrate how controllers obtained using dynamic programming can be syn- thesized as the solution of an optimization program, consider a system with the following discrete-time dynamics: xk+1 = f(xk, uk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (3) These dynamics can be obtained, for instance, by (1), through a discretization process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In (3), xk denotes the state, uk ∈ Uk(xk) the input, and the input set Uk(xk) may depend in general on the time k and the state xk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The value iteration algorithm to solve a deterministic dynamic programming problem with no terminal cost can be stated as follows [2]: Jk+1(xk) = min uk∈Uk(xk) � gk(xk, uk) + Jk(fk(xk, uk)) � , (4) Multi-Robot Multi-Learned-Tasks 5 with J0(x0) = 0, where x0 is the initial state, and gk(xk, uk) is the cost incurred at time k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The total cost accumulated along the system trajectory is given by J(x0) = lim N→∞ N−1 � k=0 αkgk(xk, uk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (5) In this paper, we will consider α = 1 and we will assume there exists a cost-free termination state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' By Proposition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='1 in [2] the value iteration algorithm (4) converges to J⋆ satisfying J⋆(x) = min u∈U (x) � g(x, u) + J⋆(f(x, u)) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (6) Adopting an approximation scheme in value space, J⋆ can be replaced by its approximation ˜J⋆ by solving the following approximate dynamic programming algorithm: ˜Jk+1(xk) = min uk∈Uk(xk) � gk(xk, uk) + ˜Jk(fk(xk, uk)) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In these settings, deep RL algorithms can be leveraged to find parametric ap- proximations, ˜J⋆, of the value function using neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This will be the paradigm considered in this paper in order to approximate value functions en- coding the tasks to be executed in a prioritized fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The bridge between dynamic programming and constraint-driven control is optimal control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In fact, the cost in (5) is typically considered in optimal control problems, recalled, in the following, for the continuous time control affine system (1): minimize u(·) � ∞ 0 � q(x(t)) + u(t)T u(t) � dt subject to ˙x = f0(x) + f1(x)u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (7) Comparing (7) with (5), we recognize that the instantaneous cost g(x, u) in (5) in the context of the optimal control problem (7) corresponds to q(x) + uT u, where q: X → R is a continuously differentiable and positive definite function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' A dynamic programming argument on (7) leads to the following Hamilton- Jacobi-Bellman equation: Lf0J⋆(x) − 1 4Lf1J⋆(x) (Lf1J⋆(x))T + q(x) = 0, where J∗ is the value function—similar to (6) for continuous-time problems— representing the minimum cost-to-go from state x, defined as J⋆(x) = min u(·) � ∞ t � q(x(τ)) + u(τ)T u(τ) � dτ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (8) 1 Problems of this class are referred to as shortest path problems in [2] 6 Gennaro Notomista The optimal policy corresponding to the optimal value function (8) can be eval- uated as follows [4]: u⋆ = −1 2 (Lf1J⋆(x))T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (9) In order to show how the optimal policy u⋆ in (9) can be obtained using an optimization-based formulation, we now recall the concept of control Lyapunov functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Definition 1 (Control Lyapunov function [30]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' A continuously differen- tiable, positive definite function V : Rn → R is a control Lyapunov function (CLF) for the system (1) if, for all x ̸= 0 inf u � Lf0V (x) + Lf1V (x)u � < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' (10) To select a control input u which satisfies the inequality (10), a universal expression—known as the Sontag’s formula [31]—can be employed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' With the aim of encoding the optimal control input u⋆ by means of a CLF, we will consider the following modified Sontag’s formula originally proposed in [7]: u(x) = � −v(x) (Lf1V (x))T if Lf1V (x) ̸= 0 0 otherwise, (11) where v(x) = Lf0V (x)+ � (Lf0V (x)) 2+q(x)Lf1V (x)(Lf1V (x)) T Lf1V (x)(Lf1V (x)) T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' As shown in [20], the modified Sontag’s formula (11) is equivalent to the solution of the optimal control problem (7) if the following relation between the CLF V and the value function J⋆ holds: ∂J⋆ ∂x = λ(x)∂V ∂x , (12) where λ(x) = 2v(x) (Lf1V (x))T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The relation in (12) corresponds to the fact that the level sets of the CLF V and those of the value function J⋆ are parallel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The last step towards the constrained-optimization-based approach to gen- erate optimal control policies is to recognize the fact that, owing to its inverse optimality property [7], the modified Sontag’s formula (11) can be obtained using the following constrained-optimization formulation, also known as the pointwise min-norm controller: minimize u ∥u∥2 subject to Lf0V (x) + Lf1V (x)u ≤ −σ(x), (13) where σ(x) = � (Lf0V (x))2 + q(x)Lf1V (x) (Lf1V (x))T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This formulation shares the same optimization structure with the one introduced in (2) in Section 2, and in the next section we will provide a formulation which strengthens the connection with approximate dynamic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Multi-Robot Multi-Learned-Tasks 7 In Appendix A, additional results are reported, which further illustrate the theoretical equivalence discussed in this section, by comparing the optimal con- troller, the optimization-based controller, and a policy learned using the RL framework for a simple dynamical system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 3 Prioritized Multi-Task Execution When V = ˜J⋆, the min-norm controller solution of (13) is the optimal policy which would be learned using a deep RL algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This is what allows us to bridge the gap between constraint-driven control and RL and it is the key to execute tasks learned using the RL paradigm in a compositional, incremental, prioritized, and computationally-efficient fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Following the formulation given in (2), the multi-task prioritized execution of tasks learned using RL can be implemented executing the control input solution of the following optimization program: minimize u,δ ∥u∥2 + κ∥δ∥2 subject to 1 λi(x) � Lf0 ˜J⋆ i (x) + Lf1 ˜J⋆ i (x)u � ≤ −σi(x) + δi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , M Kδ ≥ 0 (14) where ˜J⋆ 1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , ˜J⋆ M are the approximated value functions encoding the tasks learned using the RL paradigm (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' value iteration).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In summary, with the RL paradigm, one can get the approximate value functions ˜J⋆ 1 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , ˜J⋆ M;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' the robotic system is then controlled using the control input solution of (14) in order to execute these tasks in a prioritized fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The Lie derivatives Lf0 ˜J⋆ 1 (x), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , Lf0 ˜J⋆ M(x) contain the gradients ∂J⋆ 1 ∂x , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , ∂J⋆ M ∂x .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' When ˜J⋆ 1 (x), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , ˜J⋆ M(x) are approximated using neural networks, these gradients can be efficiently computed using back propagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' We conclude this section with the following Proposition 1, which ensures the stability of the prioritized execution of multiple tasks encoded through the value functions ˜J⋆ i by a robotic system modeled by the dynamics (1) and controlled with control input solution of (14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Proposition 1 (Stability of multiple prioritized learned tasks).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Con- sider executing a set of M prioritized tasks encoded by approximate value func- tions ˜J⋆ i , i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , M, by solving the optimization problem in (14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Assume the following: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' All constraints in (14) are active 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The robotic system can be modeled by driftless control affine dynamical sys- tem, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' f0(x) = 0 ∀x ∈ X 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The instantaneous cost function g used to learn the tasks is positive for all x ∈ X and u ∈ U .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 8 Gennaro Notomista Then, � �� ˜J⋆ 1 (x(t)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' ˜J⋆ M(x(t)) � �� → N(K), as t → ∞, where N(K) denotes the null space of the prioritization matrix K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' That is, the tasks will be executed according to the priorities specified by the prioritization matrix K in (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The Lagrangian associated with the optimization problem (14) is given by L(u, δ) = ∥u∥2 + κ∥δ∥2 + ηT 1 � ˆf0(x) + ˆf1(x)u + σ(x) − δ � + ηT 2 (−Kδ), where ˆf0(x) ∈ RM and ˆf1(x) ∈ RM×m defined as follows: the i-th component of ˆf0(x) is equal to 1 λi(x)Lf0 ˜J⋆ i (x), while the i-th row of ˆfi(x) is equal to 1 λi(x)Lf1 ˜J⋆ i (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' η1 and η2 are the Lagrange multipliers corresponding to the task and prioritization constraints, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' From the KKT conditions, we obtain: u = −1 2 � ˆf1(x)T 0 � η δ = 1 2κ � I −KT � η, (15) where η = [ηT 1 , ηT 2 ]T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' By resorting to the Lagrange dual problem, and by using assumption 1, we get the following expression for η: η = 2 � I κ + ˆf1(x) ˆf1(x)T −KT K KKT �−1 � �� � A−1 1 � ˆf0(x) + σ(x) 0 � � �� � b0 , (16) where I denotes an identity matrix of appropriate size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Substituting (16) in (15), we get u = − � ˆf1(x)T 0 � A−1 1 b0 and δ = 1 κ � I −KT � A−1 1 b0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' To show the claimed stability property, we will proceed by a Lyapunov argu- ment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Let us consider the Lyapunov function candidate V (x) = 1 2 ˜J(x)⋆T KT K ˜J⋆(x), where ˜J⋆(x) = � ˜J⋆ 1 (x) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' ˜J⋆ M(x) �T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The time derivative of V evaluates to: ˙V = ∂V ∂x ˙x = ˜J(x)⋆T KT K ∂ ˜J⋆ ∂x ˙x = ˜J(x)⋆T KT K ∂ ˜J⋆ ∂x f1(x) � �� � ˆ f1,λ(x) u (by assumption 2), where, notice that ˆf1,λ(x) = Λ(x) ˆf1(x) ∈ RM and Λ(x) = diag ([λ1(x), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , λM(x)]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' By assumption 2, λi(x) ≥ 0 for all i, and therefore Λ(x) ⪰ 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Λ(x) is positive semidefinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Then, ˙V = ˜J(x)⋆T KT KΛ(x) ˆf1(x)u = − ˜J(x)⋆T KT KΛ(x) ˆA �σ(x) 0 � , where ˆA = ˆf1(x) � ˆf1(x)T 0 � � I κ + ˆf1(x) ˆf1(x)T −KT K KKT �−1 ⪰ 0 Multi-Robot Multi-Learned-Tasks 9 as in Proposition 3 in [17], and we used assumption 2 to simplify the expression of b0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' By assumption 3, it follows that the value functions ˜J⋆ i are positive definite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Therefore, from the definition of σ, in a neighborhood of 0 ∈ RM, we can bound σ(x)—defined by the gradients of ˜J⋆—by the value of ˜J⋆ as σ(x) = γJ( ˜J⋆(x)), where γJ is a class K function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Then, proceeding similarly to Proposition 3 in [17], we can bound ˙V as fol- lows: ˙V = − ˜J(x)⋆T KT KΛ(x) ˆAγJ( ˜J(x)) ≤ − ˜J(x)⋆T KT KΛ(x) ˜J(x) ≤ −¯λV (x), where ¯λ = min{λ1(x), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' , λM(x)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Hence, K ˜J⋆(x(t)) → 0 as t → ∞, and ˜J⋆(x(t)) → N (K) as t → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The proof of Proposition 1 can be carried out even in case of time- varying and state-dependent prioritization matrix K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Under the assumption that K is bounded and continuously differentiable for all x and uniformly in time, the norm and the gradient of K can be bounded in order to obtain an upper bound for ˙V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Even when the prioritization stack specified through the matrix K in (14) is not physically realizable—due to the fact that, for instance, the functions encoding the tasks cannot achieve the relative values prescribed by the prioriti- zation matrix—the optimization program will still be feasible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Nevertheless, the tasks will not be executed with the desired priorities and even the execution of high-priority tasks might be degraded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 4 Experimental Results In this section, the proposed framework for the execution of prioritized stacks of tasks is showcased in simulation using a team of mobile robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Owing to the multitude of robotic units of which they are comprised, multi-robot systems are often highly redundant with respect to the tasks they have to execute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Therefore, they perfectly lend themselves to the concurrent execution of multiple prioritized tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='1 Multi-Robot Tasks For multi-robot systems, the redundancy stems from the multiplicity of robotic units of which the system is comprised.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In this section, we will showcase the execution of dependent tasks—two tasks are dependent if executing one prevents the execution of the other [1]—in different orders of priority.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The multi-robot system is comprised of 6 planar robots modeled with single integrator dynamics and controlled to execute the following 4 tasks: All robots assemble an hexagonal formation (task T1), robot 1 goes to goal point 1 (task T2), robot 2 goes to goal point 2 (task T3), robot 3 goes to goal point 3 (task T4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' While Task 1 is independent of each of the other tasks taken singularly, it is not independent of any pair of tasks 2, 3, and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This intuitively corresponds to the fact that it is possible to form a hexagonal formation in different points in space, but it might 10 Gennaro Notomista (a) t = 0s (b) t = 5s (c) t = 10s (d) t = 15s (e) t = 19s (f) t = 23s (g) t = 27s (h) t = 30s (i) t = 34s (j) t = 38s (k) t = 42s (l) t = 45s 0 10 20 30 40 Time [s] 0 5 10 15 (m) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Snapshots (2a-2l) and plot of ˜J⋆ 1 , ˜J⋆ 2 , ˜J⋆ 3 , and ˜J⋆ 4 (2m) corresponding to the hexagonal formation control task and 3 go-to-goal tasks for robots 1, 2, and 3, re- spectively, recorded during the course of a simulated experiment with a multi-robot system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Robots are gray dots, connection edges between the robots used to assemble the desired formation are depicted in blue, goal points are shown as red dots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 22 32 32 32 32 32 3 12 32 32 32 32 3Multi-Robot Multi-Learned-Tasks 11 not be feasible to form a hexagonal formation while two robots are constrained to be in two pre-specified arbitrary locations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Figure 2 reports a sequence of snapshots and the graph of the value functions encoding the four tasks recorded during the course of the experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Denoting by Ti ≺ Tj the condition under which task Ti has priority higher than Tj, the sequence of prioritized stacks tested in the experiment are the following: � � � � � T2, T3, T4 ≺ T1 0s ≤ t < 15s T1 ≺ T2, T3, T4 15s ≤ t < 30s T1 ≺ T2 ≺ T3, T4 30s ≤ t ≤ 45s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The plot of the value functions in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 2l shows how, for 0s ≤ t < 15s, since the hexagonal formation control algorithm has lower priority compared to the three go-to-goal tasks, its value function ˜J⋆ 1 is allowed to grow while the other three value functions are driven to 0 by the velocity control input solution of (14) supplied to the robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' For 15s ≤ t < 30s, the situation is reversed: the hexagonal formation control is executed with highest priority while the value functions encoding the three go-to-goal tasks are allowed to grow—a condition which corresponds to the non-execution of the tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Finally, for 30s ≤ t ≤ 45s, task T2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' go-to-goal task for robot 1 to goal point 1 is added at higher priority with respect to tasks T3 and T4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Since this is independent by task T1, it can be executed at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' As a result, as can be seen from the snapshots, the formation translates towards the red point marked with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Tasks T1 and T2 are successfully executed while tasks T3 and T4 are not executed since are not independent by the first two and they have lower priority.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Remark 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The optimization program responsible for the execution of multiple prioritized tasks encoded by value functions is solved at each iteration of the robot control loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' This illustrates how the convex optimization formulation of the developed framework is computationally efficient and therefore amenable to be employed in online settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Alternative approaches for task prioritiza- tion and allocation in the context of multi-robot systems generally result in (mixed-)integer optimization programs, which are often characterized by a com- binatorial nature and are not always suitable for an online implementation [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='2 Discussion The experiments of the previous section highlight several amenable properties of the framework developed in this paper for the prioritized execution of tasks encoded by a value function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' First of all, its compositionality is given by the fact that tasks can easily be inserted and removed by adding and removing constraints from the optimization program (14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' For the same reason the frame- work is incremental and modular as it allows for building a complex task using a number of subtasks which can be incrementally added to the constraints of an optimization-based controller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Moreover, it allows for seamless incorporation of priorities among tasks, and, as we showcased in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='1, these priority can 12 Gennaro Notomista also be switched in an online fashion, in particular without the need of stopping and restarting the motion of the robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Furthermore, Proposition 1 shows that the execution of multiple tasks using the constraint-driven control is stable and the robotic system will indeed execute the given tasks according to the speci- fied priorities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Finally, as the developed optimization program is a convex QP, its low computational complexity allows for an efficient implementation in on- line settings even under real-time constraints on computationally limited robotic platforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 5 Conclusion In this paper, we presented an optimization-based framework for the prioritized execution of multiple tasks encoded by value functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The approach com- bines control-theoretic and learning techniques in order to exhibit properties of compositionality, incrementality, stability, and low computational complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' These properties render the proposed framework suitable for online and real- time robotic implementations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' A multi-robot simulated scenario illustrated its effectiveness in the control of a redundant robotic system executing a prioritized stack of tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' A Comparison Between Optimal Control, Optimization-Based Control, and RL policy To compare optimal controller, optimization-based controller, and RL policy, in this section, we consider the stabilization of a double integrator system to the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The system dynamics are given by: ˙x = �0 1 0 0 � x + �0 1 � u, where x = [x1, x2]T ∈ R2 and u ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The instantaneous cost considered in the optimal control problem (7) is given by q(x)+u2 where q(x) = xT x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The reward function of the value iteration algorithm employed to learn an approximate representation of the value function has been set to g(x, u) = −q(x) − u2, and the resulting value function ˜J⋆ has been shifted so that ˜J⋆(0) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The results of the comparison are reported in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Here, the optimization- based controller solution of (13) with V = ˜J⋆ is compared to the optimal con- troller given in (9), and the RL policy corresponding to the approximate value function ˜J⋆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' As can be seen, the optimization-based controller and the optimal controller coincide, while the RL policy becomes closer and closer as the number of training epochs increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' B Implementation Details The results reported in Section 4 have been obtained using a custom value func- tion learning algorithm written in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The details of each multi-robot task are given in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Multi-Robot Multi-Learned-Tasks 13 0 2 4 6 8 10 12 14 16 18 20 0 5 10 Optimal controller Optimization-based controller RL policy (1000 epochs) RL policy (500 epochs) RL policy (250 epochs) RL policy (125 epochs) 0 2 4 6 8 10 12 14 16 18 20 4 3 2 1 0 Optimal controller Optimization-based controller RL policy (1000 epochs) RL policy (500 epochs) RL policy (250 epochs) RL policy (125 epochs) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Comparison between the optimal controller (given in (9)), RL policy (based on the approximate value function ˜J⋆ (8)), and optimization-based controller (solution of (13) with V = ˜J⋆) employed to stabilize a double-integrator system to the origin of its state space, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' driving both x1 and x2 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' As can be seen, when trained for a sufficiently long time, the RL policy results in the optimal controller, which is also equivalent to the optimization-based controller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Each robot in the team of N robots is modeled using single integrator dy- namics ˙xi = ui, where xi, ui ∈ R2 are position and velocity input of robot i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The ensemble state and input will be denoted by x and u, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' For the formation control task, the expression of the cost g is given by g(x, u) = 1000 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='01(−E(x) − 10∥u∥2), where the value of E(x) is the formation energy defined as E(x) = �N i=1 � j∈Ni(∥xi − xj∥2 − W 2 ij)2, Ni being the neighborhood of robot i, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' the set of robots with which robot i shares an edge, and W = � ������� 0 l √ 3l 2l 0 l l 0 l 0 2l 0 √ 3l l 0 l 0 2l 2l 0 l 0 l 0 0 2l 0 l 0 l l 0 2l 0 l 0 � ������� with l = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The entry ij of the matrix W corresponds to the desired distance to be maintained between robots i and j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The cost function g for the go-to-goal tasks is given by g(x, u) = 100 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='01(−∥x − ˆx∥2 − ∥u∥2), where ˆx ∈ R2 is the desired goal point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Remark 5 (Combination of single-robot and multi-robot tasks).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Single-robot tasks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' the go-to-goal tasks considered in this paper) are combined with multi-robot 14 Gennaro Notomista tasks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' the formation control task) by defining the task gradient required to compute Lf0 ˜J⋆ i (x) and Lf1 ˜J⋆ i (x) in the optimization program (14) in the follow- ing way: ∂ ˜ J⋆ i ∂x = � 0 · · · 0 ∂ ˜ J⋆ ij ∂x 0 · · · 0 � , where the j-th entry ˜J⋆ ij is the approximate value function for task i and robot j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' References 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Antonelli, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Stability analysis for prioritized closed-loop inverse kinematic al- gorithms for redundant robotic systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' IEEE Transactions on Robotics 25(5), 985–994 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' DOI 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='1109/TRO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='2017135 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Bertsekas, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : Reinforcement learning and optimal control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Athena Scientific Belmont, MA (2019) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Boyd, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Vandenberghe, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Cambridge University Press (2004) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Bryson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Ho, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : Applied optimal control: optimization, estimation, and control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Routledge (2018) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Bylard, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Bonalli, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Pavone, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Composable geometric motion policies using multi-task pullback bundle dynamical systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:2101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='01297 (2021) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Dulac-Arnold, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Mankowitz, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Hester, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Challenges of real-world reinforce- ment learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='12901 (2019) 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Freeman, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Primbs, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : Control lyapunov functions: New ideas from an old source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: Proceedings of 35th IEEE Conference on Decision and Control, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 3926–3931.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' IEEE (1996) 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Gerkey, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Matari´c, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': A formal analysis and taxonomy of task allocation in multi-robot systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' The International journal of robotics research 23(9), 939–954 (2004) 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Ghosh, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Singh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Rajeswaran, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Kumar, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Divide-and-conquer reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='09874 (2017) 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Gupta, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Yu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Zhao, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Kumar, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Rovinsky, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Xu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Devlin, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Reset-free reinforcement learning via multi-task learning: Learning dexterous manipulation behaviors without human intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='11203 (2021) 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Haarnoja, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Pong, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Zhou, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Dalal, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Abbeel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Composable deep reinforcement learning for robotic manipulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: 2018 IEEE international conference on robotics and automation (ICRA), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 6244–6251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' IEEE (2018) 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Haarnoja, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Tang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Abbeel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Reinforcement learning with deep energy-based policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 1352– 1361.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' PMLR (2017) 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Kaelbling, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': The foundation of efficient robot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Science 369(6506), 915–916 (2020) 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Micchelli, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Pontil, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Kernels for multi–task learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: NIPS, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 86, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Citeseer (2004) 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Mukadam, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Cheng, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Fox, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Boots, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Ratliff, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Riemannian motion policy fusion through learnable lyapunov function reshaping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: Conference on Robot Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 204–219.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' PMLR (2020) 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Nachum, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Gu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Lee, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Data-efficient hierarchical reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1805.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='08296 (2018) Multi-Robot Multi-Learned-Tasks 15 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Notomista, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Mayya, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Hutchinson, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Egerstedt, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': An optimal task allo- cation strategy for heterogeneous multi-robot systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: 2019 18th European Control Conference (ECC), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 2071–2076.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' IEEE (2019) 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Notomista, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Mayya, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Selvaggio, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Santos, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Secchi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': A set-theoretic approach to multi-task execution and prioritization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: 2020 IEEE International Conference on Robotics and Automation (ICRA), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 9873–9879.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' IEEE (2020) 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Peng, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Chang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Zhang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Abbeel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Mcp: Learning compos- able hierarchical control with multiplicative compositional policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='09808 (2019) 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Primbs, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Nevisti´c, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Doyle, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Nonlinear optimal control: A control lya- punov function and receding horizon perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Asian Journal of Control 1(1), 14–24 (1999) 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Qureshi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Johnson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Qin, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Henderson, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Boots, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Yip, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : Com- posing task-agnostic policies with deep reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='10681 (2019) 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Rana, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Li, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Ravichandar, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Mukadam, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Chernova, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Fox, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Boots, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Ratliff, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Learning reactive motion policies in multiple task spaces from hu- man demonstrations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: Conference on Robot Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 1457–1468.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' PMLR (2020) 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Ratliff, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Issac, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Kappler, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Birchfield, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Fox, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Riemannian motion policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1801.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='02854 (2018) 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Ruder, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': An overview of multi-task learning in deep neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='05098 (2017) 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Sahni, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Kumar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Tejani, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Isbell, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Learning to compose skills.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='11289 (2017) 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Schwartz, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Thrun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Finding structure in reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Advances in neural information processing systems 7, 385–392 (1995) 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Sener, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Koltun, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Multi-task learning as multi-objective optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1810.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='04650 (2018) 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Singh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : Transfer of learning by composing solutions of elemental sequential tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Machine Learning 8(3), 323–339 (1992) 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Smith, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Chiang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Sanjabi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Talwalkar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Federated multi-task learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='10467 (2017) 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Sontag, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : A lyapunov-like characterization of asymptotic controllability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' SIAM journal on control and optimization 21(3), 462–471 (1983) 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Sontag, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' : A ’universal’ construction of artstein’s theorem on nonlinear stabi- lization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Systems & control letters 13(2), 117–123 (1989) 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Teh, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Bapst, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Czarnecki, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Quan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Kirkpatrick, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Hadsell, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Heess, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Pascanu, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Distral: Robust multitask reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' arXiv preprint arXiv:1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content='04175 (2017) 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Todorov, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Compositionality of optimal control laws.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Advances in neural infor- mation processing systems 22, 1856–1864 (2009) 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Van Niekerk, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', James, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Earle, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Rosman, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': Composing value functions in reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' In: International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' 6401–6409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' PMLR (2019) 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=', Yang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=': A survey on multi-task learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} +page_content=' IEEE Transactions on Knowledge and Data Engineering (2021)' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LdE4T4oBgHgl3EQf8Q5Z/content/2301.05346v1.pdf'} diff --git a/LtE0T4oBgHgl3EQfiwHh/content/2301.02451v1.pdf b/LtE0T4oBgHgl3EQfiwHh/content/2301.02451v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..74021da814526974f124d6a7e28fe72e3fe5b76d --- /dev/null +++ b/LtE0T4oBgHgl3EQfiwHh/content/2301.02451v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a64a51d6081b4778408658d6dd9c8bef191d6f49c32c839b59e466c42a27b6ec +size 4707472 diff --git a/MtE4T4oBgHgl3EQf8w72/content/2301.05351v1.pdf b/MtE4T4oBgHgl3EQf8w72/content/2301.05351v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..96ac68d28ec1401e9b6492ccc49a609149517f0c --- /dev/null +++ b/MtE4T4oBgHgl3EQf8w72/content/2301.05351v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd1d98f1cd1dcd5fef1620955f3e1fd7699fac3242219e79167ae31f2c70f53f +size 5062764 diff --git a/MtE4T4oBgHgl3EQf8w72/vector_store/index.faiss b/MtE4T4oBgHgl3EQf8w72/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..30e776bfa66e730b3fb7cc6675ae911fb703ac21 --- /dev/null +++ b/MtE4T4oBgHgl3EQf8w72/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c41f41631c47c58167b544d48b5c7c8fe3bc2e1ccb8046b54162b195935adef2 +size 3276845 diff --git a/OtE0T4oBgHgl3EQf0wJ3/content/2301.02690v1.pdf b/OtE0T4oBgHgl3EQf0wJ3/content/2301.02690v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3cd273b8f26711c3af4ff52f79a2fc78fd457be0 --- /dev/null +++ b/OtE0T4oBgHgl3EQf0wJ3/content/2301.02690v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7711ec059d9989bf416e123046ca396c13c4eebe4c6eaf9a8978e45bed765d97 +size 1135421 diff --git a/OtE0T4oBgHgl3EQf0wJ3/vector_store/index.faiss b/OtE0T4oBgHgl3EQf0wJ3/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..da31624f936db125153915ae8df6b9039344b2ac --- /dev/null +++ b/OtE0T4oBgHgl3EQf0wJ3/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da83b71f2edba64563a05fea14f65a229b91dea2e6f57b0396a038a2fd820e71 +size 5111853 diff --git a/OtE4T4oBgHgl3EQfkA1f/vector_store/index.faiss b/OtE4T4oBgHgl3EQfkA1f/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..fd8d75ba1e412f47b3a683e695b9e44025bb3a5d --- /dev/null +++ b/OtE4T4oBgHgl3EQfkA1f/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dfc2d8402d3a2862b1f2df408ad3732e9cba6a8b2d6f9bb22b1ee50e99b6e91 +size 14024749 diff --git a/P9A0T4oBgHgl3EQfDP-k/vector_store/index.pkl b/P9A0T4oBgHgl3EQfDP-k/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..1a74c0eabd343fb792249688ac59af8c94f304ef --- /dev/null +++ b/P9A0T4oBgHgl3EQfDP-k/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:008fdab1e24585564db8787880e506e92474bc2d1de3b69130df5af5dd456ce6 +size 281680 diff --git a/PdE0T4oBgHgl3EQf1AJg/vector_store/index.pkl b/PdE0T4oBgHgl3EQf1AJg/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..84d8d12d434dc60f973bbea2fb8a060af928d71a --- /dev/null +++ b/PdE0T4oBgHgl3EQf1AJg/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffcace5464b87e7e768adbfe13b69dda62ea09215c9031bb5272062681bb3b4f +size 725239 diff --git a/Q9FQT4oBgHgl3EQfaDZD/content/tmp_files/2301.13318v1.pdf.txt b/Q9FQT4oBgHgl3EQfaDZD/content/tmp_files/2301.13318v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..37d9c0f25998d2f3f32a466c517808f6e6b02de0 --- /dev/null +++ b/Q9FQT4oBgHgl3EQfaDZD/content/tmp_files/2301.13318v1.pdf.txt @@ -0,0 +1,1592 @@ +Proxy-based Zero-Shot Entity Linking by Effective Candidate Retrieval +Maciej Wiatrak1∗, Eirini Arvaniti1, Angus Brayne1, Jonas Vetterle1, 2, Aaron Sim1 +1BenevolentAI 2Moonfire Ventures +London, United Kingdom +{maciej.wiatrak, eirini.arvaniti, angus.brayne, aaron.sim}@benevolent.ai +jonas@moonfire.com +Abstract +A recent advancement in the domain of +biomedical Entity Linking is the development +of powerful two-stage algorithms – an initial +candidate retrieval stage that generates a short- +list of entities for each mention, followed by +a candidate ranking stage. However, the ef- +fectiveness of both stages are inextricably de- +pendent on computationally expensive compo- +nents. Specifically, in candidate retrieval via +dense representation retrieval it is important +to have hard negative samples, which require +repeated forward passes and nearest neigh- +bour searches across the entire entity label set +throughout training. +In this work, we show +that pairing a proxy-based metric learning loss +with an adversarial regularizer provides an ef- +ficient alternative to hard negative sampling +in the candidate retrieval stage. In particular, +we show competitive performance on the re- +call@1 metric, thereby providing the option to +leave out the expensive candidate ranking step. +Finally, we demonstrate how the model can be +used in a zero-shot setting to discover out of +knowledge base biomedical entities. +1 +Introduction +The defining challenge in biomedical Entity Link- +ing (EL) is performing classification over a large +number of entity labels with limited availability +of labelled mention data, in a constantly evolv- +ing knowledge base. For instance, while the Uni- +fied Medical Language System (UMLS) knowl- +edge base (Bodenreider, 2004) contains millions +of unique entity labels, the EL training data in the +biomedical domain as a whole is notoriously scarce, +particularly when compared to the general domain +– Wikipedia, for instance, is powerful as both a +Knowledge base and a source of matching entities +and mentions. Furthermore, biomedical knowledge +bases are evolving rapidly with new entities be- +ing added constantly. Given this knowledge base +∗ Corresponding author. +evolution and scarcity of training data it is crucial +that biomedical entity linking systems can scale +efficiently to large entity sets, and can discover or +discern entities outside of the knowledge base and +training data. +Recent methods in the general entity linking do- +main (Logeswaran et al., 2019; Wu et al., 2020) +address the data issue with zero-shot entity linking +systems that use entity descriptions to form en- +tity representations and generalise to entities with- +out mentions. A particularly powerful architecture +was initially proposed by Humeau et al. (2019) +and further improved by Wu et al. (2020). It con- +sists of a two-stage approach: 1) candidate retrieval +in a dense space performed by a bi-encoder (Wu +et al., 2020) which independently embeds the entity +mention and its description, and 2) candidate rank- +ing performed by a cross-encoder which attends +across both the mention and entity description (Lo- +geswaran et al., 2019). In this work we focus on +the former, which is traditionally optimised with +the cross-entropy (CE) loss and aims to maximise +the similarity between the entity mention and its +description relative to the similarities of incorrect +mention-description pairs. In practice, the large +number of knowledge base entities necessitates the +use of negative sampling to avoid the computa- +tional burden of comparing each mention to all +of the entity descriptions. However, if the sam- +pled distribution of negatives is not reflective of the +model distribution, the performance may be poor. +Recently, Zhang and Stratos (2021) showed that +using hard negatives - the highest scoring incorrect +examples - results in bias reduction through better +approximation of the model distribution. Collect- +ing hard negatives is computationally expensive, as +it requires periodically performing inference and +retrieving approximate nearest neighbours for each +mention. +At the ranking stage, negative sampling is not +required, as the number of candidates usually does +arXiv:2301.13318v1 [cs.LG] 30 Jan 2023 + +not exceed 64. However, the state-of-the-art cross- +encoder model used for ranking is very expen- +sive to run, scaling quadratically with the input +sequence length. This highlights the need for ef- +ficient and performant candidate retrieval models +capable of disambiguating mentions without the +need for the expensive ranking step. +In this paper, we propose and evaluate a novel +loss for the candidate retrieval model, which breaks +the dependency between the positive and nega- +tive pairs. Our contributions are: (1) a novel loss +which significantly outperforms the benchmark +cross-entropy loss on the candidate retrieval task +when using random negatives, and performs com- +petitively when using hard negatives. (2) We design +and apply an adversarial regularization method, +based on the Fast Gradient Sign Method (Good- +fellow et al., 2015), which is designed to simulate +hard negative samples without expensively mining +them. (3) We construct a biomedical dataset for out +of knowledge base detection evaluation using the +MedMentions corpus and show that our model can +robustly identify mentions that lack a correspond- +ing entry in the knowledge base, while maintaining +high performance on the retrieval task. +Our main testing ground is the biomedical en- +tity linking dataset MedMentions (Mohan and Li, +2019), which utilizes UMLS as its knowledge base. +Additionally, to confirm that our method works also +in the general, non-biomedical domain, we evalu- +ate it on the Zero-Shot Entity Linking (ZESHEL) +dataset proposed in Logeswaran et al. (2019). We +focus on the retrieval task with the recall@1 metric, +because we are aiming to predict the entity directly +without requiring the additional expensive ranking +stage. Our results show that both the proposed loss +and regularization improve performance, achieving +state-of-the-art results on recall@1 and competitive +performance on recall@64 on both datasets. Fi- +nally, we demonstrate that our model can robustly +identify biomedical out of knowledge base enti- +ties, without requiring any changes to the training +procedure. +2 +Related Work +Zero-Shot Entity Linking +There is a plethora +of work on zero-shot entity linking methods lever- +aging the bi-encoder architecture (Wu et al., 2020) +for candidate retrieval. These include novel scoring +functions between the input and the label (Humeau +et al., 2019; Luan et al., 2021; Khattab and Zaharia, +2020), cross-domain pretraining methods (Varma +et al., 2021), training and inference optimisation +techniques (Bhowmik et al., 2021) and effective en- +tity representation methods (Ma et al., 2021). Our +work instead focuses on optimising the candidate +retriever’s loss function. +The impact of hard negatives on the entity link- +ing model performance has also been investigated +(Gillick et al., 2019; Zhang and Stratos, 2021). No- +tably, Zhang and Stratos (2021) develop analytical +tools to explain the role of hard negatives and evalu- +ate their model on the zero-shot entity linking task. +We draw on this work, but move away from the CE +loss towards a novel contrastive proxy-based loss. +Finally, there is a body of work on zero-shot en- +tity linking in the biomedical domain using cluster- +ing (Angell et al., 2021; Agarwal et al., 2021). Our +method does not consider the affinities between +mentions directly and links them independently. +Therefore, we do not study entity discovery. +An important aspect of biomedical entity linking +systems is the detection of “unlinkable” mentions +that lack a corresponding entry in the Knowledge +Base - referred to as NIL detection. Methods for +this task can be grouped into four main strategies +(Shen et al., 2014; Sevgili et al., 2020): (1) label +a mention as NIL when the corresponding candi- +date retriever does not return any candidate entities +(Tsai and Roth, 2016), (2) assign the NIL label to +mentions whose corresponding top-ranked entity +does not exceed some score threshold (Bunescu and +Pasca, 2006; Gottipati and Jiang, 2011; Lazic et al., +2015), (3) train a classifier that predicts whether +the top-ranked entity for a given mention is correct +(Moreno et al., 2017), (4) explicitly introduce a +NIL class to the candidate ranking model (Kolitsas +et al., 2018). A downside of the final approach is +that knowledge of the NIL mention distribution is +required at training time. In this work we tune a +NIL score threshold (2) on a validation set. Detect- +ing unlinkable mentions is particularly important +in the biomedical domain, where the knowledge +bases are rapidly evolving. +Proxy-based Losses +State-of-the-art entity link- +ing models such as BLINK (Wu et al., 2020) lever- +age metric learning loss during training to make +mentions similar to its assigned entity representa- +tions. Metric learning losses could be divided into +two categories, pair-based and proxy-based losses +(Kim et al., 2020). Pair-based losses can lever- +age semantic relations between data points, here + +mentions. However, training them can be highly +computationally expensive. On the other hand, +proxy-based losses are significantly less compu- +tationally complex. This is done by establishing a +proxy for each class and trying to increase the sim- +ilarity between data points and its assigned prox- +ies. Therefore, avoiding comparing the mentions +to each other in favour of comparing the mentions +to their proxies. We draw heavily on proxy-based +losses (Movshovitz-Attias et al., 2017; Kim et al., +2020) from metric learning by treating entity de- +scriptions as the proxies. We establish a proxy +for each entity, creating mention-proxy (i.e. en- +tity) pairs, and optimise the model to embed the +mention close to its assigned proxy. The loss pro- +posed here is similar to the Proxy-NCA loss of +Movshovitz-Attias et al. (2017). Our modification +is the use of the Softplus function, similar to Kim +et al. (2020), to avoid a vanishing gradient for the +true mention-proxy pair. +Adversarial Regularization +Entity linking sys- +tems often rely on careful mining of hard nega- +tive examples to boost their performance (Gillick +et al., 2019; Zhang and Stratos, 2021) at the ex- +pense of increased computational complexity. The +model needs update hard negatives for each men- +tion periodically. A potential alternative to hard +negative mining is training on adversarial exam- +ples (Szegedy et al., 2013; Goodfellow et al., 2015) +- synthetic data points designed to induce the model +to making incorrect predictions, such that they are +more challenging. Adversarial training can be seen +as data augmentation and can help reduce overfit- +ting. Goodfellow et al. (2015) introduced a simple +method for generating adversarial examples, called +Fast Gradient Sign Method (FGSM), which we +build upon in this work. FGSM creates adversarial +examples by applying small perturbations to the +original inputs - often the word embeddings for +NLP problems. FGSM has been used successfully +as a regulariser in supervised and semi-supervised +NLP tasks (Miyato et al., 2016; Pan et al., 2021). +Here, we follow a similar approach and use FGSM +to augment our training pairs with adversarial posi- +tive and negative examples. +3 +Task formulation +In the Entity Linking task we are provided with +a list of documents D ∈ D, where each document +has a set of mentions MD = {m1, m2, . . . , mND}. +The task is to link each mention mi to an entity +ei, where each entity belongs to the Knowledge +Base (KB) E. In this work we focus specifically +on the problem of biomedical zero-shot entity link- +ing. The setup for the zero-shot task is the same as +for entity linking introduced above, except that the +set of entities present in the test set is not present +in the training set, i.e. Etrain ∩ Etest = ∅ with +Etrain ∪ Etest = E. We focus specifically on the +Candidate Retrieval task, where the goal is given +a mention mi, reduce the pool of potential candi- +date entities from a KB to a smaller subset. Candi- +date retrieval is crucial for biomedical entity linking +because of the large size of knowledge bases. In +this work we use the bi-encoder architecture for +candidate retrieval. Finally, in addition to the in- +KB entity linking task, where you only consider +entities inside the KB, we also consider an out of +KB scenario, where the task is to map mentions to +the augmented set of labels E ∪ NIL, with NIL in- +dicating the absence of a corresponding KB entity. +4 +Methods +adv +adv +adv +adv +“We detect a correlation in the +expression of these two genes” +Mention +Espresso +“A type of strong black coffee +made by forcing steam through +ground coffee beans” +Entity - ‘Easy’ negative +Expression +“A look on someone's face that +conveys a particular emotion” +Entity - ‘Hard’ negative +Gene Expression +“The process by which informa- +tion from a gene is used in the +synthesis of a product” +Entity - Positive +Figure 1: Overview of our proxy-based entity link- +ing method. The mention and entity embeddings are +encoded into a joint embedding space. During train- +ing, the magnitude of the gradients of the Proxy loss +function with respect to the embedding coordinates is +a function of the similarity between the mention and +the entities (proxies). +The gradients are represented +by arrows whose widths indicate their magnitude. The +adv-labelled dotted arrows are the Fast Gradient Sign +Method adversarial perturbations. The blue circle sym- +bolizes the margin δ. +In this section, we review the categorical CE +loss, used by current state-of-the-art models, in the +context of entity linking (Wu et al., 2020; Zhang +and Stratos, 2021). We then compare it to our +proposed Proxy-based loss. Finally, we describe + +and motivate our regularization approach. +4.1 +Loss +Given a set of data points corresponding to mention +representations m ∈ M and to a set of proxies +corresponding to entities e ∈ E, the categorical CE +loss is defined as: +LCE(m, P) := − log +� +exp(s(m, e+)) +� +e∈P exp(s(m, e)) +� +, +(1) +where s(·, ·) denotes a similarity function (e.g. co- +sine similarity or dot product), e+ is the positive +proxy for mention representation m, P − is a set +of negative proxies used as negative samples, and +P = {e+} ∪ P −. +The gradient of the CE loss with respect to +s(m, e) is given by: +∂LCE +∂s(m, e) = +� +� +� +� +� +� +� +� +� +−1 + +exp(s(m, e+)) +� +e∈P exp(s(m, e)), +e = e+ +exp(s(m, e−)) +� +e∈P exp(s(m, e)), +e ∈ P − +(2) +In practice training is performed with negative +sampling. If the negatives are sampled randomly, +often the exponential term for the positive entity +is much larger than that of the negative samples +and the gradients vanish. +When s(m, e+) ≫ +s(m, e−) ∀e− ∈ P − then ∂LCE/∂s(m, e) → 0. +This behaviour is desirable when training with the +full distribution of negative pairs, but stifles learn- +ing in the noisier sampling setup. A common ap- +proach is the use of hard negatives (Gillick et al., +2019; Zhang and Stratos, 2021), which increases +performance over training with random negatives +at the cost of increased computational complexity. +On the other hand, contrastive metric learn- +ing losses (Bromley et al., 1993; Chopra et al., +2005; Hadsell et al., 2006) alleviate the vanish- +ing gradients problem by decoupling the positive +and negative loss terms. Proxy-based contrastive +losses, such as Proxy-NCA (Movshovitz-Attias +et al., 2017), aim to increase the similarity between +a data point x and its assigned proxy e+, while +decreasing the similarity between x and its nega- +tive proxies e− ∈ P −. As demonstrated in (Kim +et al., 2020), a downside of Proxy-NCA is that the +scale of its gradient is constant for positive samples. +This issue is alleviated by the Proxy Anchor loss +(Kim et al., 2020), whose gradient reflects the rel- +ative hardness of both positive and negative pairs, +resulting in improved model performance. +Drawing inspiration from the proxy-based met- +ric learning losses described above, we formulate +our Proxy-based (Pb) candidate retrieval loss as +follows: +LPb(m, P) = log(1 + exp(−α(s(m, e+) − δ)) ++ log(1 + +� +e−∈P − +exp(α(s(m, e−) + δ)), +(3) +where we use the same notation as in Eq. 1. In +addition, α is a hyperparameter controlling how +strongly positive and negative samples pull and +push each other, and δ is a margin. If α and δ are +large, the model will be strongly penalized for the +positive pair being too far from each other, and +conversely the negative pair for being too close to +each other. If α and δ are small, the model will +receive weaker feedback. The Softplus function, a +smooth approximation of the ReLU, introduces an +additional margin beyond which the model stops +penalising both positive and negative pairs, thus +reducing overfitting. The gradient of our Proxy- +based loss function is given by: +∂LPb +∂s(m, e) = +� +� +� +� +� +� +� +� +� +� +� +−α exp(−αs+) +1 + exp(−αs+), +e = e+ +α exp(αs−) +1 + +� +e−∈P − exp(αs−), +e ∈ P − +(4) +where s+ = s(m, e+) − δ, s− = s(m, e−) + +δ. This gradient reflects the relative hardness of +negative examples, decoupled from the positive +pair, which makes it less sensitive to the choice of +negative sampling scheme. +4.2 +Regularization +Our regularization approach is based on a simple +adversarial training technique, called Fast Gradi- +ent Sign Method (FGSM) (Goodfellow et al., 2015). +The idea of FGSM is to generate adversarial exam- +ples according to the following equation: +xadv = x + ϵ ∗ sign(∇xL(x, y)) +(5) +where x is the original training example, y its +corresponding label, L the loss function that is +minimised during model training, and ϵ a small +number defining the magnitude of the perturbation. + +FGSM applies a small perturbation to the input +example that should not change the label of the re- +sulting example xadv. However, Goodfellow et al. +(2015) demonstrated that even infinitesimal per- +turbations can cause drastic changes to the model +output when carefully designed. This effect is due +to the locally linear nature of neural networks in +combination with the high dimensionality of their +inputs. Moreover, it is the direction, rather than +the magnitude, of the perturbation that matters the +most. In FGSM the direction is determined by +the gradient of the loss function with respect to the +model input - x is pushed in the direction of highest +loss increase given its true label y. +In the context of entity linking task, we are +interested in generating examples adversarial to +the learned metric, in other words hard negative +and hard positive examples for a given mention +m. +To this end, we applied the following per- +turbations to the entity encoder input embeddings +z = input_embed(e): +z− +adv = z− + ϵ ∗ sign(∇z−s(m, e−)) +(6) +z+ +adv = z+ − ϵ ∗ sign(∇z+s(m, e+)) +(7) +where m is the anchor mention and z−, z+ are the +encoder input embeddings of negative and positive +entities e−, e+ correspondingly. +Given N negative entities for a mention m, the +generated adversarial entity embeddings Padv = +{z− +adv_1, . . . , z− +adv_N, z+ +adv} are used as additional +training examples, giving rise to an auxiliary loss +term that encourages the model to be invariant to +local adversarial perturbations. Thus, the final ob- +jective we are trying to minimise becomes: +LPb(m, P) + λLPb(m, Padv) +(8) +where λ is a hyperparameter controlling the relative +contributions of the two losses. +5 +Experiments +5.1 +Datasets +MedMentions +This is is a biomedical entity- +linking dataset consisting of over 4,000 PubMed +abstracts (Mohan and Li, 2019). As recommended +by the authors, we use the ST21PV subset, which +has around 200,000 mentions in total. A large num- +ber of mentions in both the validation and test splits +are zero-shot, meaning their ground truth label is +not present in the training data. We do not carry out +any additional preprocessing on the dataset. Finally, +MedMentions +Zero-Shot EL +Train +Val +Test +Train +Val +Test +Mentions +120K +40K +40K +49K +10K +10K +Entities +19K +8K +8K +333K +90K +70K +% Entities seen +100 +57.5 +57.5 +100 +0 +0 +Table 1: Statistics of datasets used. "% Entities seen" +signifies the percentage of ground truth entities seen +during training. +for the knowledge base (KB), we follow the frame- +work in Varma et al. (2021) and use the UMLS +2017AA version filtered by the types present in the +ST21PV subset. The final KB includes approxi- +mately 2.36M entities. +To evaluate our models in the NIL detection +setting, we have created a new dataset based on +MedMentions. In this dataset, we have assigned +mentions corresponding to 11 entity types a NIL +label and removed them from the Knowledge Base. +Details on the dataset statistics and removed entity +types can be found in the Appendix. +Zero-Shot Entity Linking dataset +ZESHEL, a +general domain dataset was constructed by Lo- +geswaran et al. (2019) from Wikias1. It consists +of 16 independent Wikias. The task is to link men- +tions in each document to a Wikia-specific entity +dictionary with provided entity descriptions. The +dataset is zero-shot, meaning there is no overlap in +entities between training, validation and test sets. +5.2 +Input Representation and Model +Architecture +Similarly to Wu et al. (2020); Zhang and Stratos +(2021); Varma et al. (2021) our candidate retriever +is a bi-encoder consisting of two independent +BERT transformers. We use the bi-encoder to en- +code a textual mention and an entity description in- +dependently then obtain a similarity score between +them. +Namely, Given a mention and its surrounding +context τm and an entity τe, we obtain dense +vector representations ym = red(T1(τm)) and +ye = red(T2(τe)), where T1 and T2 are the two +independent transformers of the bi-encoder and +red(·) is a function that reduces the output of a +transformer into a single vector. We use a mean +pooling operation for the function red(·). +As in Wu et al. (2020); Zhang and Stratos (2021); +Varma et al. (2021) we use the dot product to score +the mention ym against an entity vector ye when +1https://wikia.com + +using the CE loss. For our Proxy-based loss we use +cosine similarity. +In this, work, we focus on entity linking by ef- +ficient candidate retrieval, but we also include the +ranker results using the highest scoring candidate +entities in the Appendix, where we also include +more details on entity, mention and context mod- +elling. +5.3 +Training & Evaluation Details +In all our experiments we used the transformer ar- +chitecture (Vaswani et al., 2017) for the encoders. +Namely, we used BERT (Devlin et al., 2019), ini- +tialised with appropriate pre-trained weights: Sap- +BERT (Liu et al., 2021) for MedMentions and +the uncased BERT-base (Devlin et al., 2019) for +ZESHEL. For FGSM regularization, we apply ad- +versarial perturbations to the composite token em- +beddings (i.e. sum of word, position and segment +embeddings) used as input to BERT. We apply our +regularization to both Proxy-based and CE. For in- +formation on hyperparameter tuning please refer +to the Appendix. We tune all of our experiments +on the validation set and report results on the test +set. Due to hardware limitations, the training was +conducted on a single V100 GPU machine with 16 +GB of GPU memory. The limited GPU capacity, in +particular, memory, posed a challenge by constrain- +ing us to using a relatively low number of negatives +when training a retriever. +5.3.1 +Candidate Retriever +The retriever model is optimised with the Proxy- +based loss (3) and benchmark CE loss (1) for fair +comparison. We evaluate the retriever on the micro- +averaged recall@1 and recall@64 metrics, where +in our setup recall@1 is equivalent to accuracy. +Here we focus on the recall@1 metric, which is +highly relevant for efficient candidate retrieval mod- +els that do not necessitate running an expensive +cross-encoder for candidate ranking. We use two +negative sampling techniques: (1) Random, where +the negatives are sampled uniformly at random +from all entities in the knowledge base, and (2) +Mixed-p: p percent of the negatives are hard, the +rest are random. This is motivated by the results +shown in Zhang and Stratos (2021). We set the p +to 50%. +Hard negative mining +Retrieving hard nega- +tives requires running the model in the inference +mode over the entire KB. Then, for each mention, +the most similar (i.e. hard) negatives are sampled +according to a scoring function. Here, we use +FAISS (Johnson et al., 2019) for obtaining hard +negatives given a mention and an index of entity +embeddings from the KB. +Running a forward pass over the entire KB at reg- +ular intervals can be costly and time-consuming as +the KB often amounts to millions of entities. More- +over, the computational complexity of retrieving +hard negatives may grow exponentially depending +on the scoring function. For example, the tradition- +ally used scoring function also leveraged in this +work, where the mention and entity are both rep- +resented with a single embedding requires O(me) +approximate nearest neighbour searches, where m +and e are the number of mentions and entities re- +spectively. However, employing an alternative scor- +ing function such as the sum-of-max used in Zhang +and Stratos (2021) which requires comparing a set +of mention embeddings with a set of entity em- +beddings results in O(mexy) where x and y is the +number of mention vector and entity vector embed- +dings. In Zhang and Stratos (2021) x and y are +set to 128, the number of maximum tokens in the +mention and entity input sequence. +This highlights the computational cost of hard +negative mining and underlines the need for both +methods which can work effectively with random +samples as well as more efficient hard negative min- +ing strategies. In this work we propose a method +for the former. +Biomedical Out of Knowledge Base Detection +For the biomedical NIL detection scenario training +proceeds exactly as in the in-KB setting. We train +models with the Proxy-based loss with different +margins, and also a model with the CE loss. In +each case, we use a validation set that includes NIL +mentions to select an appropriate threshold for the +retrieval model. Mentions whose corresponding +top-ranked entity does not achieve this score are +assigned the NIL label. We choose the threshold +that maximises the F1 score for NIL entities in +the validation set. We then apply this threshold to +detect NIL mentions in the test set. +6 +Results +We present the results for candidate retrieval and +benchmark our models against suitable methods. +We name our method Proxy-based Entity Linking +(PEL-Pb). We also report the results of a version +of our model which uses the CE (PEL-CE) loss on + +# Neg. +recall@1 +recall@64 +Angell et al. (2021) +- +50.8 +85.3 +Agarwal et al. (2021) +- +72.3 +95.6 +Varma et al. (2021) +100 +71.7 +- +PEL-CE +32 (mixed) +72.1 +95.5 +64 (mixed) +72.1 +95.6 +64 (random) +55.7 +94.0 +PEL-Pb +32 (mixed) +71.6 +93.3 +64 (mixed) +72.6 +95.0 +64 (random) +63.3 +95.9 +PEL-CE + FGSM +32 (mixed) +72.3 +95.5 +PEL-Pb + FGSM +32 (mixed) +72.4 +93.7 +Table 2: Candidate retrieval results on the MedMen- +tions dataset. CE and Pb refers to cross-entropy and +proxy-based losses respectively. All experiments were +run with mixed random and hard negatives “(mixed)", +or only “(random)" negatives. The bold figures repre- +sent the best score for each recall metric. Note that +FGSM PEL variants were only run with 32 negatives +due to GPU memory constraints. +Random +Mixed +recall@1 +recall@64 +recall@1 +recall@64 +Wu et al. (2019)† +- +81.80 +46.5 +84.8 +Agarwal et al. (2021) +38.6 +84.0 +50.4 +85.1 +Ma et al. (2021) +45.4 +90.8 +- +- +Zhang and Stratos (2021) +- +87.62 +- +89.6 +PEL-CE +44.1 +84.8 +52.5 +87.2 +PEL-Pb +48.9 +85.2 +53.1 +86.0 +PEL-CE + FGSM +44.1 +85.2 +53.2 +87.2 +PEL-Pb + FGSM +49.7 +85.6 +54.2 +86.6 +Table 3: Candidate retrieval results on the ZESHEL +dataset. CE and Pb refers to cross-entropy and proxy- +based losses respectively. The negative to positive sam- +ple ratio for all PEL runs is 32. The bold figures repre- +sent the best score for each sampling strategy (random +vs. mixed random and hard). The highlighted figure +represents the best overall score across strategies. †we +use the results reported in Zhang and Stratos (2021) for +random negatives and Ma et al. (2021) for mixed nega- +tives. +all experiments for comparison. +6.1 +MedMentions +Table 2 shows that all approaches using bi-encoder +transformer models strongly outperform the N- +Gram TF-IDF proposed in Angell et al. (2021) for +recall@1 and also recall@64. We also observe the +strong positive effect of including hard negatives +during model training. The effect is particularly +strong for the CE loss, where recall@1 increases by +17% compared with training on random negatives. +We believe that such difference is partly due to the +large size of the KB MedMentions KB, amounting +to 2.36M entities, which contributes to the impor- +tance of hard negative mining. For the Proxy-based +loss, including hard negatives increases recall@1 +by 9%, achieving state-of-the-art performance of +72.6%. Adding FGSM regularisation boosted per- +formance, as can be seen from the experiments with +32 negatives (the largest number of negatives we +could fit into GPU memory when applying FGSM). +However, it did not exceed the performance of the +unregularized model with 64 negative samples. +NIL +All classes incl. NIL +auPR +Precision +Recall +Recall@1 +Recall@64 +Pb (m=0) +83.7 +81.2 +71.0 +72.6 +90.4 +Pb (m=0.01) +84.4 +81.6 +71.5 +72.5 +90.2 +Pb (m=0.05) +85.8 +83.3 +73.5 +72.4 +89.9 +Pb (m=0.1) +87.6 +85.2 +79.2 +69.4 +85.7 +CE +32.3 +31.8 +74.0 +64.4 +76.1 +Table 4: NIL detection results on the MedMentions +dataset. auPR, precision and recall are reported exclu- +sively for the NIL class, whereas micro-averaged re- +call@1 and recall@64 are reported for all classes in- +cluding NIL. Pb: Proxy-based with margin m, CE: +Cross-Entropy. +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Recall +0.2 +0.4 +0.6 +0.8 +1.0 +Precision +Pb (m=0.1) +Pb (m=0.05) +Pb (m=0.01) +Pb (m=0) +CE +Figure 2: Precision Recall curves for NIL detection on +the MedMentions dataset. Pb: Proxy-based with mar- +gin m, CE: Cross-Entropy. +Biomedical Out of Knowledge Base detection +We also evaluated our proposed loss function on +NIL detection. All models trained with the Proxy- +based loss significantly outperform the CE-based +model in terms of both precision and recall (Figure +2). The CE loss does not encourage low scores in +absolute value for negatives examples, but rather +encourages scores that are lower than the scores of +positive examples. As we can see from the results, +CE training fails to assign low scores to NIL men- +tions, as these are out-of-distribution negatives and +thus have not been compared to positive examples +during model training. Our Proxy-based loss does +not suffer from this issue, even with a margin of 0. + +We believe that this is accomplished by the decou- +pling of the positive and negative loss terms, such +that low absolute score values are encouraged for +negative examples. +Furthermore, the higher the Proxy-based margin +the better the model’s performance with respect to +detecting NIL mentions. At the same time, Proxy- +based models with lower margins perform better at +the overall recall metrics (Figure 2). These metrics +are computed with respect to all classes including +the NIL class. Given that the performance differ- +ences among models with different margins are +minimal, a practitioner could choose how to set +the margin considering the trade-off between NIL +detection and overall model performance. To our +knowledge, we are the first to propose a method for +NIL detection using the bi-encoder architecture. +6.2 +Zero-Shot Entity Linking dataset +Based on the candidate retrieval results in Table 3, +we can conclude six key points. (1) Proxy-based +models (Pb) outperform their Cross-Entropy (CE) +counterparts across all considered settings for re- +call@1. In particular, our Proxy-based model using +hard negatives and FGSM regularization achieves +state-of-the-art recall@1 on this dataset. This high- +lights the gain that we get by breaking the depen- +dency between positive and negative pairs. (2) In- +cluding hard negatives always boosts model perfor- +mance. This is particularly evident on the recall@1 +metric. The model trained with CE loss strongly de- +pends on hard negatives, with recall@1 increasing +by 8% compared to training with random negatives. +For the Proxy-based loss the increase is 4%, as +the model already performs competitively when +trained with random negatives. This showcases the +importance of hard negative sampling for the CE +loss. Hard negatives provide the model with much +more meaningful feedback and avoid the threat of +vanishing gradients (Eq. 2). (3) The difference be- +tween Pb and CE models becomes much smaller +for recall@64. Trivially, as k increases, recall@k +for all models will converge towards 1. Addition- +ally, as k increases to above the number of hard +negatives, the model’s ability to distinguish the +hard negatives from the positive will not be seen in +the metric. (4) CE models marginally outperform +Pb models with hard negatives at recall@64. Hard +negatives consistently have a larger impact on CE +compared to Pb also at recall@64 (2), while the +benefits of Pb have been nullified as discussed in +(3). (5) Alternative methods leveraging the CE loss +and different model architectures such as MuVER +(Ma et al., 2021) and SOM (Zhang and Stratos, +2021) outperform the bi-encoder based approach +at recall@64. However, both MuVER and SOM +are more complex models tuned for achieving high +recall@64, whereas the main focus of our approach +is high recall@1 in the pursuit of avoiding the ad- +ditional ranking stage. Pb outperforms the only +single stage entity linking model Agarwal et al. +(2021) across the board. (6) FGSM regularization +boosts the results of both Proxy-based and CE mod- +els, demonstrating its promise as a general method +for regularizing the retrieval model. +7 +Discussion and Future Work +We have proposed and evaluated a novel proxy- +based loss for biomedical candidate retrieval. Ad- +ditionally, we have adopted an adversarial regular- +ization technique designed to simulate hard neg- +atives, and shown that both our loss and regular- +ization boost performance on the recall@1 metric. +We have also constructed a biomedical dataset for +NIL detection and demonstrated that our candidate +retrieval model can robustly identify biomedical +NIL entities, while maintaining high overall per- +formance. These are important advances towards +closing the gap between the two-stage approach +that include an expensive cross-encoder and a can- +didate retriever-only setup. +Notably, our work highlights the importance of +hard negative sampling when optimising the can- +didate generator with the CE loss. Random nega- +tive sampling together with CE loss can result in +the problem becoming trivial, for example the ran- +domly sampled negative entity having a different +type. However, accessing hard negative examples +during model training can be challenging, particu- +larly when the knowledge base is large and entity +representations are frequently updated. +Considering this, we recommend to employ our +Proxy-based loss for the candidate retrieval task in +three different scenarios: (1) training with random +negatives, (2) optimising for recall@1, (3) detect- +ing NIL entities. Moreover, we also recommend +leveraging FGSM regularisation in any setup and +both retrieval and ranking tasks. +An interesting approach would be to attempt +to approximate hard negatives without frequent +updates of the entity representations. This could +potentially be done by keeping the entity encoder + +frozen, or exploring alternative relatedness mea- +sures which does not require frequently running +the model over the whole knowledge base. Fi- +nally, there is a plethora of work on proxy-based +(Movshovitz-Attias et al., 2017; Kim et al., 2020) +and pair-based losses (Bromley et al., 1993; Chopra +et al., 2005; Schroff et al., 2015; Dong and Shen, +2018), usually discussed in the computer vision and +metric learning literature. Improving the candidate +retrieval is a crucial step towards high-performing +and efficient entity linking systems that can be eas- +ily applied in real-world settings. +Limitations +There are several limitations of our work. Firstly, +we only demonstrate the advantages of our pro- +posed method when computing hard negatives is +computationally expensive, which is the case with +large knowledge bases and expensive scoring meth- +ods. If computing hard negatives is not a bottleneck, +one may use negative sampling with the baseline +CE loss. However, biomedical knowledge bases +typically contain a huge number of entities. Sec- +ondly, in our experiments we were limited to single +GPU machines with at most 16GB of GPU mem- +ory. This prevented us from including more than +64 negatives samples in the standard setup and +32 negative samples when using FGSM regular- +ization, which could potentially be benefit model +performance. Thirdly, we acknowledge that some +comparison to related work is missing, in particu- +lar, Zhang and Stratos (2021). We were not able to +reproduce the results cited in the paper using the +publicly available code. Finally, our work is limited +to proxy-based metric learning losses. More space +could be devoted to the topic of how one could +utilise metric learning more broadly for biomedical +entity linking. We leave this for future work. +Ethics Statement +The BERT-based models fine-tuned in this work +and datasets are publicly available. We will also +make our code as well as the biomedical out of +knowledge base detection dataset publicly avail- +able. +The task of entity linking is often crucial for +downstream applications, such as relation extrac- +tion, hence potential biases at the entity lining stage +can have significant harmful downstream conse- +quences. One source of such biases are the pre- +trained language models fine-tuned in this work. +There is a considerable body of work devoted to +the topic of biases in language models. One way +the entity linking systems can be particularly harm- +ful is when they commit or propagate errors in +the language models, knowledge bases, mention +detection across certain populations such as races +or genders. Because of the high ambiguity across +biomedical mentions and entities in the knowledge +base, it is important that the users investigate the +output prediction of the entity linking system and +often take is a suggestion, rather than gold standard. +Finally, we highlight that linking the entity to its +entry in the knowledge base and out of knowledge +base detection can be analogous to surveillance +and tracking in the computer vision domain, which +comes with substantial ethical considerations. +Acknowledgements +We thank Dane Corneil, Georgiana Neculae and +Juha Iso-Sipilä for helpful feedbacks and the anony- +mous reviewers for constructive comments on the +manuscript. +References +Dhruv Agarwal, Rico Angell, Nicholas Monath, and +Andrew McCallum. 2021. +Entity linking and dis- +covery via arborescence-based supervised clustering. +arXiv preprint arXiv:2109.01242. +Rico Angell, Nicholas Monath, Sunil Mohan, Nishant +Yadav, and Andrew McCallum. 2021. +Clustering- +based inference for biomedical entity linking. +In +Proceedings of the 2021 Conference of the North +American Chapter of the Association for Computa- +tional Linguistics: Human Language Technologies, +pages 2598–2608, Online. Association for Compu- +tational Linguistics. +Rajarshi Bhowmik, Karl Stratos, and Gerard de Melo. +2021. Fast and effective biomedical entity linking +using a dual encoder. ArXiv, abs/2103.05028. +Olivier Bodenreider. 2004. The Unified Medical Lan- +guage System (UMLS): integrating biomedical ter- +minology. +Nucleic acids research, 32(Database +issue):D267–D270. +Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard +Säckinger, and Roopak Shah. 1993. Signature ver- +ification using a "siamese" time delay neural net- +work. In Proceedings of the 6th International Con- +ference on Neural Information Processing Systems, +NIPS’93, page 737–744, San Francisco, CA, USA. +Morgan Kaufmann Publishers Inc. +Razvan Bunescu and Marius Pasca. 2006. Using en- +cyclopedic knowledge for named entity disambigua- + +tion. Association for Computational Linguistics, Eu- +ropean Chapter. +S. Chopra, R. Hadsell, and Y. LeCun. 2005. Learning +a similarity metric discriminatively, with application +to face verification. In 2005 IEEE Computer Society +Conference on Computer Vision and Pattern Recog- +nition (CVPR’05), volume 1, pages 539–546 vol. 1. +Jacob Devlin, Ming-Wei Chang, Kenton Lee, and +Kristina Toutanova. 2019. +BERT: Pre-training of +deep bidirectional transformers for language under- +standing. +In Proceedings of the 2019 Conference +of the North American Chapter of the Association +for Computational Linguistics: Human Language +Technologies, Volume 1 (Long and Short Papers), +pages 4171–4186, Minneapolis, Minnesota. Associ- +ation for Computational Linguistics. +Xingping Dong and Jianbing Shen. 2018. Triplet loss +in siamese network for object tracking. In Proceed- +ings of the European Conference on Computer Vi- +sion (ECCV). +Daniel +Gillick, +Sayali +Kulkarni, +Larry +Lansing, +Alessandro Presta, Jason Baldridge, Eugene Ie, and +Diego Garcia-Olano. 2019. Learning dense repre- +sentations for entity retrieval. CoNLL 2019 - 23rd +Conference on Computational Natural Language +Learning, Proceedings of the Conference, pages +528–537. +Ian Goodfellow, +Jonathon Shlens, +and Christian +Szegedy. 2015. Explaining and harnessing adversar- +ial examples. In International Conference on Learn- +ing Representations (ICLR). +Swapna Gottipati and Jing Jiang. 2011. Linking enti- +ties to a knowledge base with query expansion. As- +sociation for Computational Linguistics. +Raia Hadsell, Sumit Chopra, and Yann LeCun. 2006. +Dimensionality reduction by learning an invariant +mapping. In 2006 IEEE Computer Society Confer- +ence on Computer Vision and Pattern Recognition +(CVPR’06), volume 2, pages 1735–1742. IEEE. +Samuel Humeau, Kurt Shuster, Marie-Anne Lachaux, +and J. Weston. 2019. Poly-encoders: Transformer +architectures and pre-training strategies for fast and +accurate multi-sentence scoring. arXiv: Computa- +tion and Language. +Jeff Johnson, Matthijs Douze, and Hervé Jégou. 2019. +Billion-scale similarity search with GPUs. +IEEE +Transactions on Big Data, 7(3):535–547. +Omar Khattab and Matei Zaharia. 2020. ColBERT: Ef- +ficient and Effective Passage Search via Contextual- +ized Late Interaction over BERT, page 39–48. As- +sociation for Computing Machinery, New York, NY, +USA. +Sungyeon Kim, Dongwon Kim, Minsu Cho, and Suha +Kwak. 2020. +Proxy anchor loss for deep metric +learning. In Proceedings of the IEEE/CVF Confer- +ence on Computer Vision and Pattern Recognition +(CVPR). +Nikolaos +Kolitsas, +Octavian-Eugen +Ganea, +and +Thomas Hofmann. 2018. End-to-end neural entity +linking. arXiv preprint arXiv:1808.07699. +Nevena Lazic, Amarnag Subramanya, Michael Ring- +gaard, and Fernando Pereira. 2015. +Plato: A se- +lective context model for entity resolution. Transac- +tions of the Association for Computational Linguis- +tics, 3:503–515. +Fangyu Liu, Ehsan Shareghi, Zaiqiao Meng, Marco +Basaldella, and Nigel Collier. 2021. Self-alignment +pretraining for biomedical entity representations. In +Proceedings of the 2021 Conference of the North +American Chapter of the Association for Computa- +tional Linguistics: Human Language Technologies, +pages 4228–4238, Online. Association for Compu- +tational Linguistics. +Lajanugen Logeswaran, Ming-Wei Chang, Kenton Lee, +Kristina Toutanova, Jacob Devlin, and Honglak Lee. +2019. Zero-shot entity linking by reading entity de- +scriptions. In Proceedings of the 57th Annual Meet- +ing of the Association for Computational Linguistics, +pages 3449–3460, Florence, Italy. Association for +Computational Linguistics. +Yi Luan, Jacob Eisenstein, Kristina Toutanova, and +Michael Collins. 2021. +Sparse, dense, and atten- +tional representations for text retrieval. +Transac- +tions of the Association for Computational Linguis- +tics, 9:329–345. +Xinyin Ma, Yong Jiang, Nguyen Bach, Tao Wang, +Zhongqiang Huang, Fei Huang, and Weiming Lu. +2021. MuVER: Improving first-stage entity retrieval +with multi-view entity representations. In Proceed- +ings of the 2021 Conference on Empirical Methods +in Natural Language Processing, pages 2617–2624, +Online and Punta Cana, Dominican Republic. Asso- +ciation for Computational Linguistics. +Takeru Miyato, +Andrew M Dai, +and Ian Good- +fellow. 2016. +Adversarial training methods for +semi-supervised text classification. arXiv preprint +arXiv:1605.07725. +Sunil Mohan and Donghui Li. 2019. Medmentions: A +large biomedical corpus annotated with UMLS con- +cepts. CoRR, abs/1902.09476. +Jose G Moreno, Romaric Besançon, Romain Beau- +mont, Eva D’hondt, Anne-Laure Ligozat, Sophie +Rosset, Xavier Tannier, and Brigitte Grau. 2017. +Combining word and entity embeddings for entity +linking. +In European Semantic Web Conference, +pages 337–352. Springer. +Yair Movshovitz-Attias, Alexander Toshev, Thomas K. +Leung, Sergey Ioffe, and Saurabh Singh. 2017. No +fuss distance metric learning using proxies. In Pro- +ceedings of the IEEE International Conference on +Computer Vision (ICCV). + +Lin Pan, Chung-Wei Hang, Avirup Sil, Saloni Pot- +dar, and Mo Yu. 2021. Improved text classification +via contrastive adversarial training. arXiv preprint +arXiv:2107.10137. +Florian Schroff, Dmitry Kalenichenko, and James +Philbin. 2015. +Facenet: A unified embedding for +face recognition and clustering. In 2015 IEEE Con- +ference on Computer Vision and Pattern Recognition +(CVPR), pages 815–823. +Ozge Sevgili, Artem Shelmanov, Mikhail Arkhipov, +Alexander Panchenko, and Chris Biemann. 2020. +Neural entity linking: A survey of models based on +deep learning. arXiv preprint arXiv:2006.00575. +Wei Shen, Jianyong Wang, and Jiawei Han. 2014. En- +tity linking with a knowledge base: Issues, tech- +niques, and solutions. IEEE Transactions on Knowl- +edge and Data Engineering, 27(2):443–460. +Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, +Joan Bruna, Dumitru Erhan, Ian Goodfellow, and +Rob Fergus. 2013. Intriguing properties of neural +networks. arXiv preprint arXiv:1312.6199. +Chen-Tse Tsai and Dan Roth. 2016. +Cross-lingual +wikification using multilingual embeddings. In Pro- +ceedings of the 2016 Conference of the North Amer- +ican Chapter of the Association for Computational +Linguistics: Human Language Technologies, pages +589–598. +Maya Varma, Laurel Orr, Sen Wu, Megan Leszczynski, +Xiao Ling, and Christopher Ré. 2021. Cross-domain +data integration for named entity disambiguation in +biomedical text. In Findings of the Association for +Computational Linguistics: EMNLP 2021, pages +4566–4575, Punta Cana, Dominican Republic. As- +sociation for Computational Linguistics. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob +Uszkoreit, Llion Jones, Aidan N. Gomez, undefine- +dukasz Kaiser, and Illia Polosukhin. 2017. Attention +is all you need. In Proceedings of the 31st Interna- +tional Conference on Neural Information Processing +Systems, NIPS’17, page 6000–6010, Red Hook, NY, +USA. Curran Associates Inc. +Ledell Wu, Fabio Petroni, Martin Josifoski, Sebastian +Riedel, and Luke Zettlemoyer. 2019. Zero-shot En- +tity Linking with Dense Entity Retrieval. +Ledell Wu, Fabio Petroni, Martin Josifoski, Sebastian +Riedel, and Luke Zettlemoyer. 2020. Scalable zero- +shot entity linking with dense entity retrieval. +In +Proceedings of the 2020 Conference on Empirical +Methods in Natural Language Processing (EMNLP), +pages 6397–6407, Online. Association for Computa- +tional Linguistics. +Wenzheng Zhang and Karl Stratos. 2021. Understand- +ing hard negatives in noise contrastive estimation. In +Proceedings of the 2021 Conference of the North +American Chapter of the Association for Computa- +tional Linguistics: Human Language Technologies, +pages 1090–1101, Online. Association for Compu- +tational Linguistics. +Appendices +A +Context and Mention Modelling +We represent a mention and its surrounding context, +τm, as a sequence of word piece tokens +[CLS] ctxtl [Ms] mention [Me] ctxtr [SEP] +where mention, ctxtl and ctxtr are the word-piece +tokens of the mention, left and right context, and +[Ms] and [Me] are special tokens marking the start +and end of a mention respectively. +Due to the differences in available data, we rep- +resent entities differently for ZESHEL and Med- +Mentions. On ZESHEL, we represent entities with +a sequence of word piece tokens +[CLS] title [ENT] description [SEP] +where [ENT] is a special separator token. In con- +trast, when training on the MedMentions dataset +we represent an entity by the sequence +[CLS] title [SEP] types [SEP] description [SEP] +Descriptions of entities were sourced from UMLS. +B +Candidate ranker setup and results +To evaluate the impact of our candidate retriever +model on the downstream task of candidate ranking, +we also conducted ranking experiments on both +datasets. +# Candidates +Ranker +Accuracy +ZESHEL +Wu et al. (2020) +64 +Base +61.3 +Wu et al. (2020) +64 +Large +63.0 +Zhang and Stratos (2021) +64 +Base +66.7 +Zhang and Stratos (2021) +64 +Large +67.1 +PEL-Pb +16 +Base +62.8 +PEL-Pb + FGSM +16 +Base +64.6 +MedMentions +Bhowmik et al. (2021)† +- +- +68.4 +Angell et al. (2021) +- +- +72.8 +Varma et al. (2021) +10 +Base +74.6 +PEL-Pb +16 +Base +74.0 +PEL-Pb + FGSM +16 +Base +74.6 +Angell et al. (2021) +- +- +74.1 ++ post-processing +Varma et al. (2021) +10 +Base +74.8 ++ post-processing +Table 5: Ranker results on the ZESHEL and MedMen- +tions datasets. +†uses the full MedMentions dataset, +rather than the ST21PV subset used by other models re- +ported in the table and recommended by MedMentions +authors’. + +Figure 3: Comparison of smoothed gradient norms over training steps using two losses, CE and Proxy-based. +The left plot visualizes the smoothed gradient norm when using random, and the right one leveraging mixed-50% +negatives. All the experiments were conducted on ZESHEL using 32 negatives. +Training & Evaluation setup +Similarly as in re- +lated work (Logeswaran et al., 2019; Wu et al., +2020; Zhang and Stratos, 2021), the highest scor- +ing candidate entities from the candidate retriever +are passed to a ranker, which is a cross-encoder +consisting of one BERT transformer. The cross- +encoder Logeswaran et al. (2019) is used to select +the best entity out of the candidate pool. It takes +as input τm,e, which is the concatenation of men- +tion/context and entity representations τm and τe. +We then obtain a dense vector representation for +a mention-entity pair ym,e = Tcross(τm,e), where +Tcross(τm,e) is the BERT transformer of the cross- +encoder and red(·) is a mean pooling function that +takes the mean over input tokens embeddings. En- +tity candidates are scored by applying a linear layer +scross(m, e) = ym,eW. +We pick the best performing retrieval model on +recall@16 and use it to retrieve top 16 candidate +entities for each mention. As the number of can- +didate entities is relatively low, we do not perform +negative sampling and optimise the cross-encoder +with the CE loss (Eq. 1). We report the micro- +averaged unnormalized accuracy on the MedMen- +tions dataset and macro-averaged unnormalized +accuracy on the ZESHEL dataset in line with the +prior work (Zhang and Stratos, 2021; Wu et al., +2020). The results are shown in the Table 5. +Results +In Table 5 we can observe the down- +stream effect of having a candidate generator model +with high recall@1 performance. On ZESHEL, +We can see that a cross-encoder trained with the +top 16 candidates from our best performing can- +didate generator achieved higher accuracy than +Wu et al. (2020) who used the top 64 candidates. +Moreover, similarly as with the candidate retrieval, +FGSM boosts performance. For completeness, we +have also included the state-of-the-art results from +Zhang and Stratos (2021) who used 64 candidates +and a larger BERT model in the cross-encoder. +In our experiments we were limited to a single +GPU with 16 GB memory which restricted us to +a low number of maximum candidates, namely +16. We strongly believe that including more candi- +dates than 16 would boost the performance of our +method. +On MedMentions a cross-encoder trained with +the top 16 candidates from our best performing +candidate generator model achieved a competitive +accuracy of 74%. The accuracy further increased +to 74.6% when adding FGSM regularisation, com- +ing close to the state-of-the-art performance of +Varma et al. (2021), which includes additional post- +processing. +C +Training details +The hyperparameters used for conducting the ex- +periments are visible in Table 6. We use a single +NVIDIA V100 GPU with 16 GB of GPU memory +for all model trainings. +D +Biomedical Out of Knowledge Base +dataset details +We constructed the OKB dataset by replacing the +label of a set of mentions from the MedMentions +corpus (Mohan and Li, 2019) with the NIL class. +Namely we pick the mentions belonging to 11 +types: Mental Process, Health Care Related Or- +ganization, Element Ion or Isotope, Medical De- +vice, Health Care Activity, Diagnostic Procedure, +Professional or Occupational Group, Mental Pro- +cess, Laboratory Procedure, Regulation or Law, + +70 +Cross-Entropy +70 +Cross-Entropy +Proxy-based +Proxy-based +09 +60 +50 +Gradient Norm +Gradient Norm +50 +40 +40 +30 +30 +20 +20 +10 +. +0 +10 +0 +1000 +2000 +3000 +4000 +5000 +6000 +0 +1000 +2000 +3000 +4000 +5000 +6000 +Training step +Training stepParam +Bi-encoder +Cross-Encoder +Input sequence length +128 +256 +learning rate +1e-5 +2e-5 +warmup proportion +0.25 +0.2 +eps +1e-6 +1e-6 +gradient clipping value +1.0 +1.0 +effective batch size +32 +4 +epochs +7 +5 +learning rate scheduler +linear +linear +optimiser +AdamW +AdamW +α +32 +- +δ +0.0 +- +FGSM λ +1 +1 +FGSM ϵ +0.01 +0.01 +Table 6: Learning parameters for the bi-encoder and +cross-encoder. +Organization, Professional Society. The final OKB +subset includes approximately 24K mentions and +3K unique entities. +To ensure that the OKB dataset does not suf- +fer from easy inferences and allows us to evaluate +model performance. We ensured that the zero-shot +distribution of the OKB mentions and types across +the train/validation/test split was in line with the +zero-shot distribution of mentions and types in the +whole dataset. Additionally, we verified that there +is no significant overlap between mention surface +forms across the splits. Moreover, we looked at +the length of entity descriptions which are used +to create entity representations checking that the +OKB mentions entity representations statistics are +similar to the statistics computed using the whole +dataset. +E +Gradient norm analysis +Train +Dev +Test +Mentions +14K +4.8K +4.7K +Entities +2.2K +1.1K +1.1K +% Entities seen +100 +57.7 +57.5 +Table 7: Statistics of the OKB MedMentions subset. +Figure 3 shows the behaviour of the gradient l2 +norm for both losses. We can see that for both ran- +dom and mixed negatives, the norm of the Proxy- +based loss has considerably lower variance. This +is visible particularly when using the mixed nega- +tives. + diff --git a/Q9FQT4oBgHgl3EQfaDZD/content/tmp_files/load_file.txt b/Q9FQT4oBgHgl3EQfaDZD/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..2dfea0ae8a641c0d60bdc8039384442226eadc5f --- /dev/null +++ b/Q9FQT4oBgHgl3EQfaDZD/content/tmp_files/load_file.txt @@ -0,0 +1,769 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf,len=768 +page_content='Proxy-based Zero-Shot Entity Linking by Effective Candidate Retrieval Maciej Wiatrak1∗, Eirini Arvaniti1, Angus Brayne1, Jonas Vetterle1, 2, Aaron Sim1 1BenevolentAI 2Moonfire Ventures London, United Kingdom {maciej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='wiatrak, eirini.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='arvaniti, angus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='brayne, aaron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='sim}@benevolent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='ai jonas@moonfire.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='com Abstract A recent advancement in the domain of biomedical Entity Linking is the development of powerful two-stage algorithms – an initial candidate retrieval stage that generates a short- list of entities for each mention, followed by a candidate ranking stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, the ef- fectiveness of both stages are inextricably de- pendent on computationally expensive compo- nents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Specifically, in candidate retrieval via dense representation retrieval it is important to have hard negative samples, which require repeated forward passes and nearest neigh- bour searches across the entire entity label set throughout training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this work, we show that pairing a proxy-based metric learning loss with an adversarial regularizer provides an ef- ficient alternative to hard negative sampling in the candidate retrieval stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In particular, we show competitive performance on the re- call@1 metric, thereby providing the option to leave out the expensive candidate ranking step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, we demonstrate how the model can be used in a zero-shot setting to discover out of knowledge base biomedical entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 1 Introduction The defining challenge in biomedical Entity Link- ing (EL) is performing classification over a large number of entity labels with limited availability of labelled mention data, in a constantly evolv- ing knowledge base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For instance, while the Uni- fied Medical Language System (UMLS) knowl- edge base (Bodenreider, 2004) contains millions of unique entity labels, the EL training data in the biomedical domain as a whole is notoriously scarce, particularly when compared to the general domain – Wikipedia, for instance, is powerful as both a Knowledge base and a source of matching entities and mentions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Furthermore, biomedical knowledge bases are evolving rapidly with new entities be- ing added constantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Given this knowledge base ∗ Corresponding author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' evolution and scarcity of training data it is crucial that biomedical entity linking systems can scale efficiently to large entity sets, and can discover or discern entities outside of the knowledge base and training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Recent methods in the general entity linking do- main (Logeswaran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020) address the data issue with zero-shot entity linking systems that use entity descriptions to form en- tity representations and generalise to entities with- out mentions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' A particularly powerful architecture was initially proposed by Humeau et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2019) and further improved by Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' It con- sists of a two-stage approach: 1) candidate retrieval in a dense space performed by a bi-encoder (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020) which independently embeds the entity mention and its description, and 2) candidate rank- ing performed by a cross-encoder which attends across both the mention and entity description (Lo- geswaran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this work we focus on the former, which is traditionally optimised with the cross-entropy (CE) loss and aims to maximise the similarity between the entity mention and its description relative to the similarities of incorrect mention-description pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In practice, the large number of knowledge base entities necessitates the use of negative sampling to avoid the computa- tional burden of comparing each mention to all of the entity descriptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, if the sam- pled distribution of negatives is not reflective of the model distribution, the performance may be poor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Recently, Zhang and Stratos (2021) showed that using hard negatives - the highest scoring incorrect examples - results in bias reduction through better approximation of the model distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Collect- ing hard negatives is computationally expensive, as it requires periodically performing inference and retrieving approximate nearest neighbours for each mention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' At the ranking stage, negative sampling is not required, as the number of candidates usually does arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='13318v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='LG] 30 Jan 2023 not exceed 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, the state-of-the-art cross- encoder model used for ranking is very expen- sive to run, scaling quadratically with the input sequence length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This highlights the need for ef- ficient and performant candidate retrieval models capable of disambiguating mentions without the need for the expensive ranking step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this paper, we propose and evaluate a novel loss for the candidate retrieval model, which breaks the dependency between the positive and nega- tive pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our contributions are: (1) a novel loss which significantly outperforms the benchmark cross-entropy loss on the candidate retrieval task when using random negatives, and performs com- petitively when using hard negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2) We design and apply an adversarial regularization method, based on the Fast Gradient Sign Method (Good- fellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2015), which is designed to simulate hard negative samples without expensively mining them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (3) We construct a biomedical dataset for out of knowledge base detection evaluation using the MedMentions corpus and show that our model can robustly identify mentions that lack a correspond- ing entry in the knowledge base, while maintaining high performance on the retrieval task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our main testing ground is the biomedical en- tity linking dataset MedMentions (Mohan and Li, 2019), which utilizes UMLS as its knowledge base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Additionally, to confirm that our method works also in the general, non-biomedical domain, we evalu- ate it on the Zero-Shot Entity Linking (ZESHEL) dataset proposed in Logeswaran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We focus on the retrieval task with the recall@1 metric, because we are aiming to predict the entity directly without requiring the additional expensive ranking stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our results show that both the proposed loss and regularization improve performance, achieving state-of-the-art results on recall@1 and competitive performance on recall@64 on both datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Fi- nally, we demonstrate that our model can robustly identify biomedical out of knowledge base enti- ties, without requiring any changes to the training procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2 Related Work Zero-Shot Entity Linking There is a plethora of work on zero-shot entity linking methods lever- aging the bi-encoder architecture (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020) for candidate retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' These include novel scoring functions between the input and the label (Humeau et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Luan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Khattab and Zaharia, 2020), cross-domain pretraining methods (Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021), training and inference optimisation techniques (Bhowmik et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021) and effective en- tity representation methods (Ma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our work instead focuses on optimising the candidate retriever’s loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The impact of hard negatives on the entity link- ing model performance has also been investigated (Gillick et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' No- tably, Zhang and Stratos (2021) develop analytical tools to explain the role of hard negatives and evalu- ate their model on the zero-shot entity linking task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We draw on this work, but move away from the CE loss towards a novel contrastive proxy-based loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, there is a body of work on zero-shot en- tity linking in the biomedical domain using cluster- ing (Angell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our method does not consider the affinities between mentions directly and links them independently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Therefore, we do not study entity discovery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' An important aspect of biomedical entity linking systems is the detection of “unlinkable” mentions that lack a corresponding entry in the Knowledge Base - referred to as NIL detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Methods for this task can be grouped into four main strategies (Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Sevgili et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020): (1) label a mention as NIL when the corresponding candi- date retriever does not return any candidate entities (Tsai and Roth, 2016), (2) assign the NIL label to mentions whose corresponding top-ranked entity does not exceed some score threshold (Bunescu and Pasca, 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Gottipati and Jiang, 2011;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Lazic et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2015), (3) train a classifier that predicts whether the top-ranked entity for a given mention is correct (Moreno et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2017), (4) explicitly introduce a NIL class to the candidate ranking model (Kolitsas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' A downside of the final approach is that knowledge of the NIL mention distribution is required at training time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this work we tune a NIL score threshold (2) on a validation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Detect- ing unlinkable mentions is particularly important in the biomedical domain, where the knowledge bases are rapidly evolving.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Proxy-based Losses State-of-the-art entity link- ing models such as BLINK (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020) lever- age metric learning loss during training to make mentions similar to its assigned entity representa- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Metric learning losses could be divided into two categories, pair-based and proxy-based losses (Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Pair-based losses can lever- age semantic relations between data points, here mentions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, training them can be highly computationally expensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' On the other hand, proxy-based losses are significantly less compu- tationally complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This is done by establishing a proxy for each class and trying to increase the sim- ilarity between data points and its assigned prox- ies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Therefore, avoiding comparing the mentions to each other in favour of comparing the mentions to their proxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We draw heavily on proxy-based losses (Movshovitz-Attias et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020) from metric learning by treating entity de- scriptions as the proxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We establish a proxy for each entity, creating mention-proxy (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' en- tity) pairs, and optimise the model to embed the mention close to its assigned proxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The loss pro- posed here is similar to the Proxy-NCA loss of Movshovitz-Attias et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our modification is the use of the Softplus function, similar to Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020), to avoid a vanishing gradient for the true mention-proxy pair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Adversarial Regularization Entity linking sys- tems often rely on careful mining of hard nega- tive examples to boost their performance (Gillick et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos, 2021) at the ex- pense of increased computational complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The model needs update hard negatives for each men- tion periodically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' A potential alternative to hard negative mining is training on adversarial exam- ples (Szegedy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2015) synthetic data points designed to induce the model to making incorrect predictions, such that they are more challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Adversarial training can be seen as data augmentation and can help reduce overfit- ting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2015) introduced a simple method for generating adversarial examples, called Fast Gradient Sign Method (FGSM), which we build upon in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' FGSM creates adversarial examples by applying small perturbations to the original inputs - often the word embeddings for NLP problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' FGSM has been used successfully as a regulariser in supervised and semi-supervised NLP tasks (Miyato et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Pan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Here, we follow a similar approach and use FGSM to augment our training pairs with adversarial posi- tive and negative examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 3 Task formulation In the Entity Linking task we are provided with a list of documents D ∈ D, where each document has a set of mentions MD = {m1, m2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' , mND}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The task is to link each mention mi to an entity ei, where each entity belongs to the Knowledge Base (KB) E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this work we focus specifically on the problem of biomedical zero-shot entity link- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The setup for the zero-shot task is the same as for entity linking introduced above, except that the set of entities present in the test set is not present in the training set, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Etrain ∩ Etest = ∅ with Etrain ∪ Etest = E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We focus specifically on the Candidate Retrieval task, where the goal is given a mention mi, reduce the pool of potential candi- date entities from a KB to a smaller subset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Candi- date retrieval is crucial for biomedical entity linking because of the large size of knowledge bases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this work we use the bi-encoder architecture for candidate retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, in addition to the in- KB entity linking task, where you only consider entities inside the KB, we also consider an out of KB scenario, where the task is to map mentions to the augmented set of labels E ∪ NIL, with NIL in- dicating the absence of a corresponding KB entity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Methods ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='adv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='adv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='adv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='adv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='“We detect a correlation in the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='expression of these two genes” ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Mention ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Espresso ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='“A type of strong black coffee ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='made by forcing steam through ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='ground coffee beans” ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Entity - ‘Easy’ negative ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Expression ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content="“A look on someone's face that " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='conveys a particular emotion” ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Entity - ‘Hard’ negative ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Gene Expression ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='“The process by which informa- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='tion from a gene is used in the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='synthesis of a product” ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Entity - Positive ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='Figure 1: Overview of our proxy-based entity link- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='ing method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The mention and entity embeddings are encoded into a joint embedding space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' During train- ing, the magnitude of the gradients of the Proxy loss function with respect to the embedding coordinates is a function of the similarity between the mention and the entities (proxies).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The gradients are represented by arrows whose widths indicate their magnitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The adv-labelled dotted arrows are the Fast Gradient Sign Method adversarial perturbations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The blue circle sym- bolizes the margin δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this section, we review the categorical CE loss, used by current state-of-the-art models, in the context of entity linking (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We then compare it to our proposed Proxy-based loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, we describe and motivate our regularization approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 Loss Given a set of data points corresponding to mention representations m ∈ M and to a set of proxies corresponding to entities e ∈ E, the categorical CE loss is defined as: LCE(m, P) := − log � exp(s(m, e+)) � e∈P exp(s(m, e)) � , (1) where s(·, ·) denotes a similarity function (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' co- sine similarity or dot product), e+ is the positive proxy for mention representation m, P − is a set of negative proxies used as negative samples, and P = {e+} ∪ P −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The gradient of the CE loss with respect to s(m, e) is given by: ∂LCE ∂s(m, e) = � � � � � � � � � −1 + exp(s(m, e+)) � e∈P exp(s(m, e)), e = e+ exp(s(m, e−)) � e∈P exp(s(m, e)), e ∈ P − (2) In practice training is performed with negative sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' If the negatives are sampled randomly, often the exponential term for the positive entity is much larger than that of the negative samples and the gradients vanish.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' When s(m, e+) ≫ s(m, e−) ∀e− ∈ P − then ∂LCE/∂s(m, e) → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This behaviour is desirable when training with the full distribution of negative pairs, but stifles learn- ing in the noisier sampling setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' A common ap- proach is the use of hard negatives (Gillick et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos, 2021), which increases performance over training with random negatives at the cost of increased computational complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' On the other hand, contrastive metric learn- ing losses (Bromley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 1993;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Chopra et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2005;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Hadsell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2006) alleviate the vanish- ing gradients problem by decoupling the positive and negative loss terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Proxy-based contrastive losses, such as Proxy-NCA (Movshovitz-Attias et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2017), aim to increase the similarity between a data point x and its assigned proxy e+, while decreasing the similarity between x and its nega- tive proxies e− ∈ P −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As demonstrated in (Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020), a downside of Proxy-NCA is that the scale of its gradient is constant for positive samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This issue is alleviated by the Proxy Anchor loss (Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020), whose gradient reflects the rel- ative hardness of both positive and negative pairs, resulting in improved model performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Drawing inspiration from the proxy-based met- ric learning losses described above, we formulate our Proxy-based (Pb) candidate retrieval loss as follows: LPb(m, P) = log(1 + exp(−α(s(m, e+) − δ)) + log(1 + � e−∈P − exp(α(s(m, e−) + δ)), (3) where we use the same notation as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In addition, α is a hyperparameter controlling how strongly positive and negative samples pull and push each other, and δ is a margin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' If α and δ are large, the model will be strongly penalized for the positive pair being too far from each other, and conversely the negative pair for being too close to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' If α and δ are small, the model will receive weaker feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The Softplus function, a smooth approximation of the ReLU, introduces an additional margin beyond which the model stops penalising both positive and negative pairs, thus reducing overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The gradient of our Proxy- based loss function is given by: ∂LPb ∂s(m, e) = � � � � � � � � � � � −α exp(−αs+) 1 + exp(−αs+), e = e+ α exp(αs−) 1 + � e−∈P − exp(αs−), e ∈ P − (4) where s+ = s(m, e+) − δ, s− = s(m, e−) + δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This gradient reflects the relative hardness of negative examples, decoupled from the positive pair, which makes it less sensitive to the choice of negative sampling scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 Regularization Our regularization approach is based on a simple adversarial training technique, called Fast Gradi- ent Sign Method (FGSM) (Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The idea of FGSM is to generate adversarial exam- ples according to the following equation: xadv = x + ϵ ∗ sign(∇xL(x, y)) (5) where x is the original training example, y its corresponding label, L the loss function that is minimised during model training, and ϵ a small number defining the magnitude of the perturbation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' FGSM applies a small perturbation to the input example that should not change the label of the re- sulting example xadv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2015) demonstrated that even infinitesimal per- turbations can cause drastic changes to the model output when carefully designed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This effect is due to the locally linear nature of neural networks in combination with the high dimensionality of their inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Moreover, it is the direction, rather than the magnitude, of the perturbation that matters the most.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In FGSM the direction is determined by the gradient of the loss function with respect to the model input - x is pushed in the direction of highest loss increase given its true label y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In the context of entity linking task, we are interested in generating examples adversarial to the learned metric, in other words hard negative and hard positive examples for a given mention m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' To this end, we applied the following per- turbations to the entity encoder input embeddings z = input_embed(e): z− adv = z− + ϵ ∗ sign(∇z−s(m, e−)) (6) z+ adv = z+ − ϵ ∗ sign(∇z+s(m, e+)) (7) where m is the anchor mention and z−, z+ are the encoder input embeddings of negative and positive entities e−, e+ correspondingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Given N negative entities for a mention m, the generated adversarial entity embeddings Padv = {z− adv_1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' , z− adv_N, z+ adv} are used as additional training examples, giving rise to an auxiliary loss term that encourages the model to be invariant to local adversarial perturbations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Thus, the final ob- jective we are trying to minimise becomes: LPb(m, P) + λLPb(m, Padv) (8) where λ is a hyperparameter controlling the relative contributions of the two losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 5 Experiments 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 Datasets MedMentions This is is a biomedical entity- linking dataset consisting of over 4,000 PubMed abstracts (Mohan and Li, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As recommended by the authors, we use the ST21PV subset, which has around 200,000 mentions in total.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' A large num- ber of mentions in both the validation and test splits are zero-shot, meaning their ground truth label is not present in the training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We do not carry out any additional preprocessing on the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, MedMentions Zero-Shot EL Train Val Test Train Val Test Mentions 120K 40K 40K 49K 10K 10K Entities 19K 8K 8K 333K 90K 70K % Entities seen 100 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 100 0 0 Table 1: Statistics of datasets used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' "% Entities seen" signifies the percentage of ground truth entities seen during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' for the knowledge base (KB), we follow the frame- work in Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) and use the UMLS 2017AA version filtered by the types present in the ST21PV subset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The final KB includes approxi- mately 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='36M entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' To evaluate our models in the NIL detection setting, we have created a new dataset based on MedMentions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this dataset, we have assigned mentions corresponding to 11 entity types a NIL label and removed them from the Knowledge Base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Details on the dataset statistics and removed entity types can be found in the Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zero-Shot Entity Linking dataset ZESHEL, a general domain dataset was constructed by Lo- geswaran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2019) from Wikias1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' It consists of 16 independent Wikias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The task is to link men- tions in each document to a Wikia-specific entity dictionary with provided entity descriptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The dataset is zero-shot, meaning there is no overlap in entities between training, validation and test sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 Input Representation and Model Architecture Similarly to Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos (2021);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) our candidate retriever is a bi-encoder consisting of two independent BERT transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We use the bi-encoder to en- code a textual mention and an entity description in- dependently then obtain a similarity score between them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Namely, Given a mention and its surrounding context τm and an entity τe, we obtain dense vector representations ym = red(T1(τm)) and ye = red(T2(τe)), where T1 and T2 are the two independent transformers of the bi-encoder and red(·) is a function that reduces the output of a transformer into a single vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We use a mean pooling operation for the function red(·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As in Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos (2021);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) we use the dot product to score the mention ym against an entity vector ye when 1https://wikia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='com using the CE loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For our Proxy-based loss we use cosine similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this, work, we focus on entity linking by ef- ficient candidate retrieval, but we also include the ranker results using the highest scoring candidate entities in the Appendix, where we also include more details on entity, mention and context mod- elling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 Training & Evaluation Details In all our experiments we used the transformer ar- chitecture (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2017) for the encoders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Namely, we used BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019), ini- tialised with appropriate pre-trained weights: Sap- BERT (Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021) for MedMentions and the uncased BERT-base (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019) for ZESHEL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For FGSM regularization, we apply ad- versarial perturbations to the composite token em- beddings (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' sum of word, position and segment embeddings) used as input to BERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We apply our regularization to both Proxy-based and CE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For in- formation on hyperparameter tuning please refer to the Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We tune all of our experiments on the validation set and report results on the test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Due to hardware limitations, the training was conducted on a single V100 GPU machine with 16 GB of GPU memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The limited GPU capacity, in particular, memory, posed a challenge by constrain- ing us to using a relatively low number of negatives when training a retriever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 Candidate Retriever The retriever model is optimised with the Proxy- based loss (3) and benchmark CE loss (1) for fair comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We evaluate the retriever on the micro- averaged recall@1 and recall@64 metrics, where in our setup recall@1 is equivalent to accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Here we focus on the recall@1 metric, which is highly relevant for efficient candidate retrieval mod- els that do not necessitate running an expensive cross-encoder for candidate ranking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We use two negative sampling techniques: (1) Random, where the negatives are sampled uniformly at random from all entities in the knowledge base, and (2) Mixed-p: p percent of the negatives are hard, the rest are random.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This is motivated by the results shown in Zhang and Stratos (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We set the p to 50%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Hard negative mining Retrieving hard nega- tives requires running the model in the inference mode over the entire KB.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Then, for each mention, the most similar (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' hard) negatives are sampled according to a scoring function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Here, we use FAISS (Johnson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019) for obtaining hard negatives given a mention and an index of entity embeddings from the KB.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Running a forward pass over the entire KB at reg- ular intervals can be costly and time-consuming as the KB often amounts to millions of entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' More- over, the computational complexity of retrieving hard negatives may grow exponentially depending on the scoring function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For example, the tradition- ally used scoring function also leveraged in this work, where the mention and entity are both rep- resented with a single embedding requires O(me) approximate nearest neighbour searches, where m and e are the number of mentions and entities re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, employing an alternative scor- ing function such as the sum-of-max used in Zhang and Stratos (2021) which requires comparing a set of mention embeddings with a set of entity em- beddings results in O(mexy) where x and y is the number of mention vector and entity vector embed- dings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Zhang and Stratos (2021) x and y are set to 128, the number of maximum tokens in the mention and entity input sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This highlights the computational cost of hard negative mining and underlines the need for both methods which can work effectively with random samples as well as more efficient hard negative min- ing strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In this work we propose a method for the former.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Biomedical Out of Knowledge Base Detection For the biomedical NIL detection scenario training proceeds exactly as in the in-KB setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We train models with the Proxy-based loss with different margins, and also a model with the CE loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In each case, we use a validation set that includes NIL mentions to select an appropriate threshold for the retrieval model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Mentions whose corresponding top-ranked entity does not achieve this score are assigned the NIL label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We choose the threshold that maximises the F1 score for NIL entities in the validation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We then apply this threshold to detect NIL mentions in the test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 6 Results We present the results for candidate retrieval and benchmark our models against suitable methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We name our method Proxy-based Entity Linking (PEL-Pb).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We also report the results of a version of our model which uses the CE (PEL-CE) loss on # Neg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' recall@1 recall@64 Angell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 100 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 PEL-CE 32 (mixed) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 64 (mixed) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 64 (random) 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 PEL-Pb 32 (mixed) 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 64 (mixed) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 64 (random) 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='9 PEL-CE + FGSM 32 (mixed) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 PEL-Pb + FGSM 32 (mixed) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 Table 2: Candidate retrieval results on the MedMen- tions dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' CE and Pb refers to cross-entropy and proxy-based losses respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' All experiments were run with mixed random and hard negatives “(mixed)", or only “(random)" negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The bold figures repre- sent the best score for each recall metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Note that FGSM PEL variants were only run with 32 negatives due to GPU memory constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Random Mixed recall@1 recall@64 recall@1 recall@64 Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2019)† 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='80 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 Ma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 Zhang and Stratos (2021) 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='62 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 PEL-CE 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 PEL-Pb 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='9 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 PEL-CE + FGSM 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 PEL-Pb + FGSM 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 Table 3: Candidate retrieval results on the ZESHEL dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' CE and Pb refers to cross-entropy and proxy- based losses respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The negative to positive sam- ple ratio for all PEL runs is 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The bold figures repre- sent the best score for each sampling strategy (random vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' mixed random and hard).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The highlighted figure represents the best overall score across strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' †we use the results reported in Zhang and Stratos (2021) for random negatives and Ma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) for mixed nega- tives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' all experiments for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 MedMentions Table 2 shows that all approaches using bi-encoder transformer models strongly outperform the N- Gram TF-IDF proposed in Angell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) for recall@1 and also recall@64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We also observe the strong positive effect of including hard negatives during model training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The effect is particularly strong for the CE loss, where recall@1 increases by 17% compared with training on random negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We believe that such difference is partly due to the large size of the KB MedMentions KB, amounting to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='36M entities, which contributes to the impor- tance of hard negative mining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For the Proxy-based loss, including hard negatives increases recall@1 by 9%, achieving state-of-the-art performance of 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Adding FGSM regularisation boosted per- formance, as can be seen from the experiments with 32 negatives (the largest number of negatives we could fit into GPU memory when applying FGSM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, it did not exceed the performance of the unregularized model with 64 negative samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' NIL All classes incl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' NIL auPR Precision Recall Recall@1 Recall@64 Pb (m=0) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 Pb (m=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='01) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 Pb (m=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='05) 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='9 Pb (m=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1) 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 CE 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 Table 4: NIL detection results on the MedMentions dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' auPR, precision and recall are reported exclu- sively for the NIL class, whereas micro-averaged re- call@1 and recall@64 are reported for all classes in- cluding NIL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Pb: Proxy-based with margin m, CE: Cross-Entropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 Recall 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 Precision Pb (m=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1) Pb (m=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='05) Pb (m=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='01) Pb (m=0) CE Figure 2: Precision Recall curves for NIL detection on the MedMentions dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Pb: Proxy-based with mar- gin m, CE: Cross-Entropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Biomedical Out of Knowledge Base detection We also evaluated our proposed loss function on NIL detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' All models trained with the Proxy- based loss significantly outperform the CE-based model in terms of both precision and recall (Figure 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The CE loss does not encourage low scores in absolute value for negatives examples, but rather encourages scores that are lower than the scores of positive examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As we can see from the results, CE training fails to assign low scores to NIL men- tions, as these are out-of-distribution negatives and thus have not been compared to positive examples during model training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Our Proxy-based loss does not suffer from this issue, even with a margin of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We believe that this is accomplished by the decou- pling of the positive and negative loss terms, such that low absolute score values are encouraged for negative examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Furthermore, the higher the Proxy-based margin the better the model’s performance with respect to detecting NIL mentions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' At the same time, Proxy- based models with lower margins perform better at the overall recall metrics (Figure 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' These metrics are computed with respect to all classes including the NIL class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Given that the performance differ- ences among models with different margins are minimal, a practitioner could choose how to set the margin considering the trade-off between NIL detection and overall model performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' To our knowledge, we are the first to propose a method for NIL detection using the bi-encoder architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 Zero-Shot Entity Linking dataset Based on the candidate retrieval results in Table 3, we can conclude six key points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (1) Proxy-based models (Pb) outperform their Cross-Entropy (CE) counterparts across all considered settings for re- call@1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In particular, our Proxy-based model using hard negatives and FGSM regularization achieves state-of-the-art recall@1 on this dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This high- lights the gain that we get by breaking the depen- dency between positive and negative pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2) In- cluding hard negatives always boosts model perfor- mance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This is particularly evident on the recall@1 metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The model trained with CE loss strongly de- pends on hard negatives, with recall@1 increasing by 8% compared to training with random negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For the Proxy-based loss the increase is 4%, as the model already performs competitively when trained with random negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This showcases the importance of hard negative sampling for the CE loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Hard negatives provide the model with much more meaningful feedback and avoid the threat of vanishing gradients (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (3) The difference be- tween Pb and CE models becomes much smaller for recall@64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Trivially, as k increases, recall@k for all models will converge towards 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Addition- ally, as k increases to above the number of hard negatives, the model’s ability to distinguish the hard negatives from the positive will not be seen in the metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (4) CE models marginally outperform Pb models with hard negatives at recall@64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Hard negatives consistently have a larger impact on CE compared to Pb also at recall@64 (2), while the benefits of Pb have been nullified as discussed in (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (5) Alternative methods leveraging the CE loss and different model architectures such as MuVER (Ma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2021) and SOM (Zhang and Stratos, 2021) outperform the bi-encoder based approach at recall@64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, both MuVER and SOM are more complex models tuned for achieving high recall@64, whereas the main focus of our approach is high recall@1 in the pursuit of avoiding the ad- ditional ranking stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Pb outperforms the only single stage entity linking model Agarwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) across the board.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (6) FGSM regularization boosts the results of both Proxy-based and CE mod- els, demonstrating its promise as a general method for regularizing the retrieval model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 7 Discussion and Future Work We have proposed and evaluated a novel proxy- based loss for biomedical candidate retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ad- ditionally, we have adopted an adversarial regular- ization technique designed to simulate hard neg- atives, and shown that both our loss and regular- ization boost performance on the recall@1 metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We have also constructed a biomedical dataset for NIL detection and demonstrated that our candidate retrieval model can robustly identify biomedical NIL entities, while maintaining high overall per- formance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' These are important advances towards closing the gap between the two-stage approach that include an expensive cross-encoder and a can- didate retriever-only setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Notably, our work highlights the importance of hard negative sampling when optimising the can- didate generator with the CE loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Random nega- tive sampling together with CE loss can result in the problem becoming trivial, for example the ran- domly sampled negative entity having a different type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, accessing hard negative examples during model training can be challenging, particu- larly when the knowledge base is large and entity representations are frequently updated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Considering this, we recommend to employ our Proxy-based loss for the candidate retrieval task in three different scenarios: (1) training with random negatives, (2) optimising for recall@1, (3) detect- ing NIL entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Moreover, we also recommend leveraging FGSM regularisation in any setup and both retrieval and ranking tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' An interesting approach would be to attempt to approximate hard negatives without frequent updates of the entity representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This could potentially be done by keeping the entity encoder frozen, or exploring alternative relatedness mea- sures which does not require frequently running the model over the whole knowledge base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Fi- nally, there is a plethora of work on proxy-based (Movshovitz-Attias et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020) and pair-based losses (Bromley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 1993;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Chopra et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2005;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Schroff et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Dong and Shen, 2018), usually discussed in the computer vision and metric learning literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Improving the candidate retrieval is a crucial step towards high-performing and efficient entity linking systems that can be eas- ily applied in real-world settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Limitations There are several limitations of our work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Firstly, we only demonstrate the advantages of our pro- posed method when computing hard negatives is computationally expensive, which is the case with large knowledge bases and expensive scoring meth- ods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' If computing hard negatives is not a bottleneck, one may use negative sampling with the baseline CE loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' However, biomedical knowledge bases typically contain a huge number of entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Sec- ondly, in our experiments we were limited to single GPU machines with at most 16GB of GPU mem- ory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This prevented us from including more than 64 negatives samples in the standard setup and 32 negative samples when using FGSM regular- ization, which could potentially be benefit model performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Thirdly, we acknowledge that some comparison to related work is missing, in particu- lar, Zhang and Stratos (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We were not able to reproduce the results cited in the paper using the publicly available code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, our work is limited to proxy-based metric learning losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' More space could be devoted to the topic of how one could utilise metric learning more broadly for biomedical entity linking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We leave this for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ethics Statement The BERT-based models fine-tuned in this work and datasets are publicly available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We will also make our code as well as the biomedical out of knowledge base detection dataset publicly avail- able.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The task of entity linking is often crucial for downstream applications, such as relation extrac- tion, hence potential biases at the entity lining stage can have significant harmful downstream conse- quences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' One source of such biases are the pre- trained language models fine-tuned in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' There is a considerable body of work devoted to the topic of biases in language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' One way the entity linking systems can be particularly harm- ful is when they commit or propagate errors in the language models, knowledge bases, mention detection across certain populations such as races or genders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Because of the high ambiguity across biomedical mentions and entities in the knowledge base, it is important that the users investigate the output prediction of the entity linking system and often take is a suggestion, rather than gold standard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Finally, we highlight that linking the entity to its entry in the knowledge base and out of knowledge base detection can be analogous to surveillance and tracking in the computer vision domain, which comes with substantial ethical considerations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Acknowledgements We thank Dane Corneil, Georgiana Neculae and Juha Iso-Sipilä for helpful feedbacks and the anony- mous reviewers for constructive comments on the manuscript.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' References Dhruv Agarwal, Rico Angell, Nicholas Monath, and Andrew McCallum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Entity linking and dis- covery via arborescence-based supervised clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv preprint arXiv:2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='01242.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Rico Angell, Nicholas Monath, Sunil Mohan, Nishant Yadav, and Andrew McCallum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Clustering- based inference for biomedical entity linking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 2598–2608, Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Association for Compu- tational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Rajarshi Bhowmik, Karl Stratos, and Gerard de Melo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Fast and effective biomedical entity linking using a dual encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' ArXiv, abs/2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='05028.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Olivier Bodenreider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The Unified Medical Lan- guage System (UMLS): integrating biomedical ter- minology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Nucleic acids research, 32(Database issue):D267–D270.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard Säckinger, and Roopak Shah.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Signature ver- ification using a "siamese" time delay neural net- work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 6th International Con- ference on Neural Information Processing Systems, NIPS’93, page 737–744, San Francisco, CA, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Morgan Kaufmann Publishers Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Razvan Bunescu and Marius Pasca.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Using en- cyclopedic knowledge for named entity disambigua- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Association for Computational Linguistics, Eu- ropean Chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Chopra, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Hadsell, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' LeCun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Learning a similarity metric discriminatively, with application to face verification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recog- nition (CVPR’05), volume 1, pages 539–546 vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' BERT: Pre-training of deep bidirectional transformers for language under- standing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171–4186, Minneapolis, Minnesota.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Associ- ation for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Xingping Dong and Jianbing Shen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Triplet loss in siamese network for object tracking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceed- ings of the European Conference on Computer Vi- sion (ECCV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Daniel Gillick, Sayali Kulkarni, Larry Lansing, Alessandro Presta, Jason Baldridge, Eugene Ie, and Diego Garcia-Olano.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Learning dense repre- sentations for entity retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' CoNLL 2019 - 23rd Conference on Computational Natural Language Learning, Proceedings of the Conference, pages 528–537.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ian Goodfellow, Jonathon Shlens, and Christian Szegedy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Explaining and harnessing adversar- ial examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In International Conference on Learn- ing Representations (ICLR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Swapna Gottipati and Jing Jiang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Linking enti- ties to a knowledge base with query expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As- sociation for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Raia Hadsell, Sumit Chopra, and Yann LeCun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Dimensionality reduction by learning an invariant mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In 2006 IEEE Computer Society Confer- ence on Computer Vision and Pattern Recognition (CVPR’06), volume 2, pages 1735–1742.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Samuel Humeau, Kurt Shuster, Marie-Anne Lachaux, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Weston.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Poly-encoders: Transformer architectures and pre-training strategies for fast and accurate multi-sentence scoring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv: Computa- tion and Language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Jeff Johnson, Matthijs Douze, and Hervé Jégou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Billion-scale similarity search with GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' IEEE Transactions on Big Data, 7(3):535–547.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Omar Khattab and Matei Zaharia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' ColBERT: Ef- ficient and Effective Passage Search via Contextual- ized Late Interaction over BERT, page 39–48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As- sociation for Computing Machinery, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Sungyeon Kim, Dongwon Kim, Minsu Cho, and Suha Kwak.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Proxy anchor loss for deep metric learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Confer- ence on Computer Vision and Pattern Recognition (CVPR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Nikolaos Kolitsas, Octavian-Eugen Ganea, and Thomas Hofmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' End-to-end neural entity linking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv preprint arXiv:1808.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='07699.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Nevena Lazic, Amarnag Subramanya, Michael Ring- gaard, and Fernando Pereira.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Plato: A se- lective context model for entity resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Transac- tions of the Association for Computational Linguis- tics, 3:503–515.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Fangyu Liu, Ehsan Shareghi, Zaiqiao Meng, Marco Basaldella, and Nigel Collier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Self-alignment pretraining for biomedical entity representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 4228–4238, Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Association for Compu- tational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Lajanugen Logeswaran, Ming-Wei Chang, Kenton Lee, Kristina Toutanova, Jacob Devlin, and Honglak Lee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zero-shot entity linking by reading entity de- scriptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 3449–3460, Florence, Italy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Association for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Yi Luan, Jacob Eisenstein, Kristina Toutanova, and Michael Collins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Sparse, dense, and atten- tional representations for text retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Transac- tions of the Association for Computational Linguis- tics, 9:329–345.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Xinyin Ma, Yong Jiang, Nguyen Bach, Tao Wang, Zhongqiang Huang, Fei Huang, and Weiming Lu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' MuVER: Improving first-stage entity retrieval with multi-view entity representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceed- ings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2617–2624, Online and Punta Cana, Dominican Republic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Asso- ciation for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Takeru Miyato, Andrew M Dai, and Ian Good- fellow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Adversarial training methods for semi-supervised text classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv preprint arXiv:1605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='07725.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Sunil Mohan and Donghui Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Medmentions: A large biomedical corpus annotated with UMLS con- cepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' CoRR, abs/1902.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='09476.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Jose G Moreno, Romaric Besançon, Romain Beau- mont, Eva D’hondt, Anne-Laure Ligozat, Sophie Rosset, Xavier Tannier, and Brigitte Grau.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Combining word and entity embeddings for entity linking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In European Semantic Web Conference, pages 337–352.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Yair Movshovitz-Attias, Alexander Toshev, Thomas K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Leung, Sergey Ioffe, and Saurabh Singh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' No fuss distance metric learning using proxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Pro- ceedings of the IEEE International Conference on Computer Vision (ICCV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Lin Pan, Chung-Wei Hang, Avirup Sil, Saloni Pot- dar, and Mo Yu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Improved text classification via contrastive adversarial training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv preprint arXiv:2107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='10137.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Florian Schroff, Dmitry Kalenichenko, and James Philbin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Facenet: A unified embedding for face recognition and clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In 2015 IEEE Con- ference on Computer Vision and Pattern Recognition (CVPR), pages 815–823.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ozge Sevgili, Artem Shelmanov, Mikhail Arkhipov, Alexander Panchenko, and Chris Biemann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Neural entity linking: A survey of models based on deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv preprint arXiv:2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='00575.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Wei Shen, Jianyong Wang, and Jiawei Han.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' En- tity linking with a knowledge base: Issues, tech- niques, and solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' IEEE Transactions on Knowl- edge and Data Engineering, 27(2):443–460.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Intriguing properties of neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' arXiv preprint arXiv:1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6199.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Chen-Tse Tsai and Dan Roth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Cross-lingual wikification using multilingual embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Pro- ceedings of the 2016 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 589–598.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Maya Varma, Laurel Orr, Sen Wu, Megan Leszczynski, Xiao Ling, and Christopher Ré.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Cross-domain data integration for named entity disambiguation in biomedical text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4566–4575, Punta Cana, Dominican Republic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As- sociation for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Gomez, undefine- dukasz Kaiser, and Illia Polosukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 31st Interna- tional Conference on Neural Information Processing Systems, NIPS’17, page 6000–6010, Red Hook, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Curran Associates Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ledell Wu, Fabio Petroni, Martin Josifoski, Sebastian Riedel, and Luke Zettlemoyer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zero-shot En- tity Linking with Dense Entity Retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Ledell Wu, Fabio Petroni, Martin Josifoski, Sebastian Riedel, and Luke Zettlemoyer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Scalable zero- shot entity linking with dense entity retrieval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6397–6407, Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Association for Computa- tional Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Wenzheng Zhang and Karl Stratos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Understand- ing hard negatives in noise contrastive estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 1090–1101, Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Association for Compu- tational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Appendices A Context and Mention Modelling We represent a mention and its surrounding context, τm, as a sequence of word piece tokens [CLS] ctxtl [Ms] mention [Me] ctxtr [SEP] where mention, ctxtl and ctxtr are the word-piece tokens of the mention, left and right context, and [Ms] and [Me] are special tokens marking the start and end of a mention respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Due to the differences in available data, we rep- resent entities differently for ZESHEL and Med- Mentions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' On ZESHEL, we represent entities with a sequence of word piece tokens [CLS] title [ENT] description [SEP] where [ENT] is a special separator token.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In con- trast, when training on the MedMentions dataset we represent an entity by the sequence [CLS] title [SEP] types [SEP] description [SEP] Descriptions of entities were sourced from UMLS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' B Candidate ranker setup and results To evaluate the impact of our candidate retriever model on the downstream task of candidate ranking, we also conducted ranking experiments on both datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' # Candidates Ranker Accuracy ZESHEL Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020) 64 Base 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='3 Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020) 64 Large 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 Zhang and Stratos (2021) 64 Base 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 Zhang and Stratos (2021) 64 Large 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 PEL-Pb 16 Base 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 PEL-Pb + FGSM 16 Base 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 MedMentions Bhowmik et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021)† 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='4 Angell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 10 Base 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 PEL-Pb 16 Base 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 PEL-Pb + FGSM 16 Base 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6 Angell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1 + post-processing Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021) 10 Base 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8 + post-processing Table 5: Ranker results on the ZESHEL and MedMen- tions datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' †uses the full MedMentions dataset, rather than the ST21PV subset used by other models re- ported in the table and recommended by MedMentions authors’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Figure 3: Comparison of smoothed gradient norms over training steps using two losses, CE and Proxy-based.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The left plot visualizes the smoothed gradient norm when using random, and the right one leveraging mixed-50% negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' All the experiments were conducted on ZESHEL using 32 negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Training & Evaluation setup Similarly as in re- lated work (Logeswaran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Zhang and Stratos, 2021), the highest scor- ing candidate entities from the candidate retriever are passed to a ranker, which is a cross-encoder consisting of one BERT transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The cross- encoder Logeswaran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2019) is used to select the best entity out of the candidate pool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' It takes as input τm,e, which is the concatenation of men- tion/context and entity representations τm and τe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We then obtain a dense vector representation for a mention-entity pair ym,e = Tcross(τm,e), where Tcross(τm,e) is the BERT transformer of the cross- encoder and red(·) is a mean pooling function that takes the mean over input tokens embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' En- tity candidates are scored by applying a linear layer scross(m, e) = ym,eW.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We pick the best performing retrieval model on recall@16 and use it to retrieve top 16 candidate entities for each mention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' As the number of can- didate entities is relatively low, we do not perform negative sampling and optimise the cross-encoder with the CE loss (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We report the micro- averaged unnormalized accuracy on the MedMen- tions dataset and macro-averaged unnormalized accuracy on the ZESHEL dataset in line with the prior work (Zhang and Stratos, 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The results are shown in the Table 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Results In Table 5 we can observe the down- stream effect of having a candidate generator model with high recall@1 performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' On ZESHEL, We can see that a cross-encoder trained with the top 16 candidates from our best performing can- didate generator achieved higher accuracy than Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2020) who used the top 64 candidates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Moreover, similarly as with the candidate retrieval, FGSM boosts performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' For completeness, we have also included the state-of-the-art results from Zhang and Stratos (2021) who used 64 candidates and a larger BERT model in the cross-encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' In our experiments we were limited to a single GPU with 16 GB memory which restricted us to a low number of maximum candidates, namely 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We strongly believe that including more candi- dates than 16 would boost the performance of our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' On MedMentions a cross-encoder trained with the top 16 candidates from our best performing candidate generator model achieved a competitive accuracy of 74%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The accuracy further increased to 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='6% when adding FGSM regularisation, com- ing close to the state-of-the-art performance of Varma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' (2021), which includes additional post- processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' C Training details The hyperparameters used for conducting the ex- periments are visible in Table 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We use a single NVIDIA V100 GPU with 16 GB of GPU memory for all model trainings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' D Biomedical Out of Knowledge Base dataset details We constructed the OKB dataset by replacing the label of a set of mentions from the MedMentions corpus (Mohan and Li, 2019) with the NIL class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Namely we pick the mentions belonging to 11 types: Mental Process, Health Care Related Or- ganization, Element Ion or Isotope, Medical De- vice, Health Care Activity, Diagnostic Procedure, Professional or Occupational Group, Mental Pro- cess, Laboratory Procedure, Regulation or Law, 70 Cross-Entropy 70 Cross-Entropy Proxy-based Proxy-based 09 60 50 Gradient Norm Gradient Norm 50 40 40 30 30 20 20 10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' 0 10 0 1000 2000 3000 4000 5000 6000 0 1000 2000 3000 4000 5000 6000 Training step Training stepParam Bi-encoder Cross-Encoder Input sequence length 128 256 learning rate 1e-5 2e-5 warmup proportion 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2 eps 1e-6 1e-6 gradient clipping value 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 effective batch size 32 4 epochs 7 5 learning rate scheduler linear linear optimiser AdamW AdamW α 32 δ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='0 FGSM λ 1 1 FGSM ϵ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='01 Table 6: Learning parameters for the bi-encoder and cross-encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Organization, Professional Society.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' The final OKB subset includes approximately 24K mentions and 3K unique entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' To ensure that the OKB dataset does not suf- fer from easy inferences and allows us to evaluate model performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We ensured that the zero-shot distribution of the OKB mentions and types across the train/validation/test split was in line with the zero-shot distribution of mentions and types in the whole dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Additionally, we verified that there is no significant overlap between mention surface forms across the splits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Moreover, we looked at the length of entity descriptions which are used to create entity representations checking that the OKB mentions entity representations statistics are similar to the statistics computed using the whole dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' E Gradient norm analysis Train Dev Test Mentions 14K 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='8K 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7K Entities 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='2K 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1K 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='1K % Entities seen 100 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='7 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content='5 Table 7: Statistics of the OKB MedMentions subset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' Figure 3 shows the behaviour of the gradient l2 norm for both losses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' We can see that for both ran- dom and mixed negatives, the norm of the Proxy- based loss has considerably lower variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} +page_content=' This is visible particularly when using the mixed nega- tives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/Q9FQT4oBgHgl3EQfaDZD/content/2301.13318v1.pdf'} diff --git a/QdFRT4oBgHgl3EQfKDdm/content/tmp_files/2301.13497v1.pdf.txt b/QdFRT4oBgHgl3EQfKDdm/content/tmp_files/2301.13497v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..adbadc4bb22716818a11791a8331e229a4a82afe --- /dev/null +++ b/QdFRT4oBgHgl3EQfKDdm/content/tmp_files/2301.13497v1.pdf.txt @@ -0,0 +1,360 @@ +arXiv:2301.13497v1 [cs.IT] 31 Jan 2023 +The weight spectrum of several families of +Reed-Muller codes +Claude Carlet,∗ +Universities of Paris 8, France and Bergen, Norway. +E-mail: claude.carlet@gmail.com +Patrick Sol´e, +I2M (CNRS, Aix-Marseille University, Centrale Marseille), Marseilles, France, +E-mail: sole@enst.fr +Abstract +We determine the weight spectrum of RM(m − 3, m) for m ≥ 6, +of RM(m − 4, m) for m ≥ 8, and of RM(m − 5, m) for m ≥ 9. The +technique used is induction on m based on Corollary 2 of (Shi et al. +2019). +Keywords: Reed Muller codes, weight spectrum +MSC (2020): 94B27, 94D10 +1 +Introduction +Determining the Hamming weights in Reed-Muller codes has been considered +an important research topic for more than half a century [6, Chapt. 15]. The +weights of the Reed-Muller codes of length 2m and orders 0, 1, 2, m − 2, m − +1, m are known. These weights equal 0, 2m for the order 0, with additionally +2m−1 for the order 1, and 2m−1 ± 2i where m +2 ≤ i ≤ m for the order 2, see +∗The research of the author is partly supported by the Norwegian Research Council. +1 + +e.g. [6]. The weights in RM(m, m) are all integers between 0 and 2m since +RM(m, m) = F2m +2 ; the weights in RM(m−1, m) are all even integers between +0 and 2m; the weights in RM(m − 2, m) (the extended Hamming code) are +all even integers between 0 and 2m except 2 and 2m − 2, since by the Mac +Williams identity (see e.g. [6, Chapt. 5] or [3]), the weight distribution can be +obtained as a function of the weight distribution of its dual the RM(1, m), a +two-weight code with A0 = A2m = 1 and A2m−1 = 2m+1−2. Another method, +by induction on m, is given in [11]; we do not give it here but it will be central +for the other orders that we shall address. +All the low Hamming weights are known in all Reed-Muller codes: Berlekamp +and Sloane [2] (see the Addendum in this paper) and Kasami and Tokura [4] +have shown that, for r ≥ 2, the only Hamming weights in RM(r, m) occurring +in the range [2m−r; 2m−r+1[ are of the form 2m−r+1 − 2i for some i; and +the latter have completely characterized the codewords: the corresponding +functions are affinely equivalent to +� +x1 · · ·xr−2(xr−1xr ⊕ xr+1xr+2 ⊕ · · · ⊕ xr+2l−3xr+2l−2), +for 2 ≤ 2l ≤ m − r + 2, +x1 · · ·xr−l(xr−l+1 · · · xr ⊕ xr+1 · · · xr+l), +for 3 ≤ l ≤ min(r, m − r). +The functions whose Hamming weights are strictly less than 2.5 times the +minimum distance 2m−r have later been studied in [5]. +Those possible weights of the codewords in the Reed-Muller codes of orders +3, . . . , m − 4 whose values lie between 2.5 d and 2m − 2.5 d are unknown (but +when m = 2r + 1, they are known in some cases by using invariant theory, +because the code is then self-dual, see [6, 10]). +In this note, we completely determine the weights in RM(m−3, m), RM(m− +4, m) and RM(m − 5, m). Note that our work is on a different tack than +works such as [9] in which the authors consider some weights in some Reed- +Muller codes and look for all the functions having these weights, whereas +we are looking for the weights (all of them) in some Reed-Muller codes and +we do not try to find the functions having these weights. We first observe +that the work of [11, Theorem 16 (2)] can be made much more precise: this +reference provides some weights in RM(m − 3, m) and we show that these +weights are in fact all weights thanks to the result of Kasami-Tokura [4]. We +extend then the method to the codes RM(m − 4, m) and RM(m − 5, m). +The material is arranged as follows. The next section recalls some basic +definitions and notions needed to understand the rest of the paper. Sections +3, 4 and 5 study the weight spectra of RM(m − 3, m), RM(m − 4, m) and +2 + +RM(m − 5, m), respectively. Section 6 describes some numerical examples. +Section 7 concludes the article. +2 +Preliminaries +The Hamming weight (in brief, the weight) of an element x = (x1, . . . , xn) ∈ +Fn +2 is the number of indices i such that xi ̸= 0. A binary linear code of length +n is an F2-subspace of Fn +2. Its dimension is its dimension as an F2-vector +space. Its minimum distance (in brief, distance) is the minimum Hamming +weight of a nonzero codeword. +The extension �C of a linear code C of length n is the linear code of +length n+1 such that each codeword (c0, . . . , cn) ∈ �C satisfies both following +conditions +1. (c0, . . . , cn−1) ∈ C, +2. +n+1 +� +i=0 +ci = 0. +The weights of a code are the Hamming weights of all its codewords. +The set of distinct weights (including the zero weight) is called the weight +spectrum1. The list of the numbers, classically denoted by Ai, of codewords +of weight i for i ranging from 0 to n is called the weight distribution of the +code. The Magma notation for this quantity is the list of pairs < i, Ai > +where Ai ̸= 0. +The Reed Muller codes are a family of binary linear codes of length n = +2m. +Given the order r ∈ {0, . . . , m} of such a code (usually denoted by +RM(r, m)), the dimension equals +r� +i=0 +�m +i +� +and the minimum distance equals +2m−r. An explicit definition in terms of Boolean functions is as follows. Let +Bm denote the vector space of polynomials in m variables with coefficients in +F2, that is, of elements of F2[x1, x2, . . . , xm], in which the exponent of each +variable xi in each monomial equals 0 or 1. Write +Fn +2 = {P1, P2, . . . , Pn}. +1Contrary to the spectra used for instance in Boolean function theory, the weight +spectrum in coding theory does not include the indication of the multiplicities of the +weights. +3 + +Let ev denote the evaluation map from Bm to Fn +2 by the rule +ev(f) = (f(P1), . . . , f(Pn)). +With this notation we define the Reed-Muller code of order r by +RM(r, m) = {ev(f) | f ∈ Bm & deg(f) ≤ r}, +where deg(f) is the global degree of the multivariate polynomial f (called +the algebraic degree of the Boolean function that f represents). +We will use repeatedly the following Lemma, which is the case q = 2 of +[11, Cor. 2]. +Lemma 1. For all pairs of integers (r, m) with 0 ≤ r ≤ m, the weight +spectrum of RM(r + 1, m + 1) includes as a subset S + S, where S is the +weight spectrum of RM(r, m). +We shall refer to the following result as McEliece’s congruence. +Theorem 1. The weights in RM(r, m) are multiples of 2⌊ m−1 +r +⌋ [7]. This +bound is tight in the sense that there is at least a codeword of RM(r, m) with +weight (2t + 1)2⌊ m−1 +r +⌋ for some integer t [1]. +We will call the following deep result the Kasami-Tokura bound. It is a +consequence of [6, Chapt. 15, th. 11], which also specifies explicitly Aw. +Theorem 2. Let w be a weight of RM(r, m) in the range 2m−r ≤ w < +2m−r+1. Let α = min(r, m − r), and β = m−r+2 +2 +. The weight w is of the form +w = 2m−r+1 − 2m−r+1−µ, for µ in the range 1 ≤ µ ≤ max(α, β). Conversely, +for any such µ, there is a w of that form in the range 2m−r ≤ w < 2m−r+1. +We will also require the notion of BCH code of length n and designed +distance d, hereby denoted by BCH(n, d). See [6, Chapt. 9] for a precise +definition. +3 +The weights of the Reed-Muller codes of +length 2m and order m − 3 +For these codes, we have some information from the Mac Williams identity, +but not much since the weight distribution of RM(2, m) is rather complex, +in particular A2m−1. It is shown in [11, Theorem 16] by using Magma [8] and +by induction on m that all the integers in {0, 2, 4, ..., 2m} \ {2, 4, 6, 10, 2m − +4 + +2, 2m − 4, 2m − 6, 2m − 10} are weights in RM(m − 3, m) for m ≥ 6. +The method used consists in applying Lemma 1, which says that the set of +weights in RM(m − 3, m) contains A + A where A is the set of weights in +RM(m − 4, m − 1). The authors start then from RM(3, 6), whose weights +can be obtained by Magma [8]: {0, 2, 4, 6, 8, ..., 64}\{2, 4, 6, 10, 54, 58, 60, 62}, +and they proceed very simply by induction on m. We shall detail a little their +proof (which is slightly informal) and show that the set above covers in fact +all weights in RM(m − 3, m). +Proposition 1. For every m ≥ 6, the weights in RM(m − 3, m) are the +elements of {0, 2, 4, ..., 2m} \ {2, 4, 6, 10, 2m − 10, 2m − 6, 2m − 4, 2m − 2} = +{0, 8, 12 + 2i, 2m − 12, 2m − 8, 2m}, where i ranges over consecutive integers +from 0 to 2m−1 − 13. +Proof. The result is correct for m = 6; assuming it is correct for m ≥ 6, then +denoting the set of weights by A, we have that A contains {0, 8, 12, 14, . . ., 2m− +14, 2m −12, 2m −8, 2m} and therefore A+A contains {0, 8, 12, 14, . . ., 2m+1 − +14, 2m+1−12, 2m + 1−8, 2m+1}, since the case of 0, 8, 12, 14, . . ., 2m−14, 2m− +12, 2m − 8, 2m is covered by adding 0 to an element of A, the case of 2m, 8 + +2m, 12 + 2m, 14 + 2m, . . . , 2m+1 − 14, 2m+1 − 12, 2m+1 − 8, 2m+1 is covered by +adding 2m to an element of A, and the remaining numbers 2m−10, 2m−6, 2m− +4, 2m − 2, 2m + 2, 2m + 4, 2m + 6, 2m + 10 are easily covered as well. By an in- +duction using Lemma 1, all the numbers in {0, 8, 12+2i, 2m −12, 2m −8, 2m} +are then weights in RM(m − 3, m). To complete the proof we show that +these are the only possible weights in RM(m − 3, m). +The minimum distance of RM(m − 3, m) being 8, and the code being stable +under addition of the all-one vector (the constant Boolean function 1), the +numbers 2, 4, 6, 2m − 2, 2m − 4, 2m − 6 cannot be weights, and according to +Theorem 2 ([2, 4]), the only weights in the integral interval [8, 16) are the +numbers of the form 16 −2i in this interval, and 10 is not among them. This +completes the proof. +4 +The weights of the Reed-Muller codes of +length 2m and order m − 4 +Adapting the proof above to the order m − 4 needs to have the weights +of RM(3, 7) or better RM(4, 8) (since we can observe that starting from +5 + +RM(3, 7) makes us lose some weights). It is impossible to run Magma [8] +exhaustively in RM(3, 7) (and a fortiori in RM(4, 8)), because this code has +size 264, but the weights of these codes are known from the Online Encyclo- +pedia of Integer Sequences [12, http://oeis.org/A146976].( See §4.1 below for +a direct determination of the weights). The weights in RM(4, 8) are +{0, 16, 24, 28, 30, . . ., 226, 228, 232, 240, 256}, +where the dots represent all even integers between 32 and 224. This can be +generalized as follows. +Proposition 2. For every m ≥ 8, the set of all weights in RM(m − 4, m) +equals {0, 16, 24, 28 + 2i, 2m − 28, 2m − 24, 2m − 16, 2m}, where i ranges over +the set of consecutive integers from 0 to 2m−1 − 29. +Proof. The proof is similar to that of Proposition 1: using Lemma 1, we can +show that the set of weights contains {0, 16, 24, 28+2i, 2m−28, 2m−24, 2m− +16, 2m} and Theorem 2 tells us there is nothing else. Note that by Theorem +1 all weights are even. +Note that the weight distribution of RM(5, 9) is given in Online Encyclo- +pedia of Integer Sequences, see [12, http://oeis.org/A018897], and it confirms +our result. +5 +The weights of the Reed-Muller codes of +length 2m and order m − 5 +The special cases RM(3, 8) and RM(4, 9) are given in detail in the next +section §6.1, §6.2. Note that the weight distribution of RM(3, 8) is known +(again see §6.2), unlike that of RM(4, 9). +Proposition 3. For every m ≥ 9, the set of all weights in RM(m − 5, m) +equals {0, 32, 48, 56, 60, 64+4i, 2m−60, 2m −56, 2m −48, 2m −32, 2m}, where +i ranges over the set of consecutive integers from 0 to 2m−2 − 32. +The proof is quite similar to those of the previous propositions. It uses +induction on m, as well as Lemma 1, Theorem 1, Theorem 2, and is omitted. +6 + +6 +Numerical examples +In the present section, we show how some known weight spectra can be +obtained as well as some new ones. +6.1 +The weights of RM(3, 7) +The weights of RM(2, 6) are by the results in [6, Chapter 15] equal to: +S := {0, 16, 24, 28, 32, 36, 40, 48, 64} +(actually, they are given in [12, OEIS A001726]). +A direct hand calculation or a computation in Magma [8] yields: +S + S = {0, 16, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, +96, 100, 104, 112, 128}. +By McEliece’s congruence (Theorem 1), the weights in RM(3, 7) are multiples +of 4. And by [4] the weight 20 and its complement to 64 are excluded. +Hence, S + S equals the whole weight spectrum of RM(3, 7). +6.2 +The weights of RM(3, 8) +The present subsection will show why the induction of Proposition 3 must +start at m = 9. The weights in RM(2, 7) are by the results in [6, Chapter +15] equal to +S := {0, 32, 48, 56, 64, 72, 80, 96, 128} +(and this is confirmed by [12, OEIS A006006 ]). A direct hand calculation +or a computation in Magma [8] yields +S + S = {0, 32, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, +168, 176, 184, 192, 200, 208, 224, 256} +By Theorem 2, this list is complete in the region {0, . . . , 64} and in its +complement to 256. However all the elements in S + S are multiple of 8, +while Theorem 1 states that the weights in RM(3, 8) are multiples of 4, and +some weights are not multiple of 8. Hence S + S is strictly included in the +spectrum of RM(3, 8), since McEliece’s congruence is known to be tight [1]. +7 + +This is confirmed by the known weight distribution [12, A146953 ] +https://isec.ec.okayama-u.ac.jp/home/kusaka/wd/RM/tomita/RM256 93.wd +which gives in addition the weights: +100, 108, 116, 124, 132, 140, 148, 156, 164, +that are congruent to 4 modulo 8. +These weights can be recovered by taking the intersection of RM(3, 8) +with the extension of BCH(255, 19). The weight distribution of this in- +tersection (a code of dimension 26, small enough to allow the use of the +WeightDistribution command of Magma) is: +[< 0, 1 >, < 80, 8 >, < 88, 56 >, < 92, 512 >, < 96, 4939 >, < 100, 30216 >, +< 104, 159164 >, < 108, 615184 >, < 112, 1851060 >, < 116, 4389152 >, +< 120, 8126540 >, < 124, 11733960 >, < 128, 13287280 >, < 132, 11733960 >, +< 136, 8126540 >, < 140, 4389152 >, < 144, 1851060 >, < 148, 615184 >, +< 152, 159164 >, < 156, 30216 >, < 160, 4939 >, < 164, 512 >, < 168, 56 >, +< 176, 8 >, < 256, 1 >]. +Remark. We do not provide, properly speaking, a computer-free determina- +tion of the weight spectrum of RM(3, 8). But we show how it can be derived +in a reproducible way. +⋄ +6.3 +The weights of RM(4, 9) +According to the previous subsection, the weight spectrum of RM(3, 8) +equals: +S = {0, 32, 48, 56, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, +124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188, +192, 200, 208, 224, 256}. +A Magma calculation yields +S + S = {0, 32, 48, 56, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, +8 + +120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, +188, 192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252, +256, 260, 264, 268, 272, 276, 280, 284, 288, 292, 296, 300, 304, 308, 312, 316, 320, +324, 328, 332, 336, 340, 344, 348, 352, 356, 360, 364, 368, 372, 376, 380, 384, 388, +392, 396, 400, 404, 408, 412, 416, 420, 424, 428, 432, 436, 440, 444, 448, 456, 464, +480, 512}. +By McEliece’s congruence [7] the weights in RM(4, 9) are multiples of 4. +By Theorem 2 the weights 36, 40, 44, 52 and their complements to 512 are +excluded, since +64 − 36 = 28, 64 − 40 = 24, 64 − 52 = 12, +which are not powers of 2. However, still by Theorem 2 the integer 60 = +64 − 4 = 26 − 26−4 is a weight of RM(4, 9). +Note that this integer does not appear in the spectrum of RM(3, 8) since +then, in the notation of Theorem 2, µ ≤ ⌊max(3, 7 +2)⌋ = 3, when µ = 4 for +the weight 60 of RM(4, 9). This shows that the statement of Proposition 3 +can only be valid for m ≥ 9. +Hence {60} ∪ S + S is the whole spectrum of RM(4, 9). +7 +Conclusion and open problems +In this note, we have derived the weight spectra of three infinite families +of Reed-Muller codes. The obstruction to further generalization is merely +computational. +The base point of the recurrence might not be amenable +to exact enumeration by computer. +For instance the weight spectrum of +RM(4, 10) is not known, and the weight spectrum of RM(3, 9) given in +http://oeis.org/A018895 is too sparse to derive that of RM(4, 10) by the +techniques in this note. Based on these results we can formulate the following: +Conjecture: Let c be any positive integer. Then for m large enough, the +weight spectrum of RM(m−c, m) is made of all the weights between the min- +imum distance 2c and its complement to the length 2m, that are authorized +by McEliece’s congruence and Kasami-Tokura’s result. +9 + +This conjecture is verified for c = 1, 2, 3, 4, 5. +In view of the ternary and quinary analogues of Theorem 15 of [11], +namely Theorems 17 and 18 of [11], it is natural to ask for analogues of our +results for Generalized Reed Muller codes of characteristics 3 and 5. However, +an exact analogue of Theorem 2 in that context does not seem to be available +from the literature. +References +[1] Y.L. Borissov, On McEliece’s result about divisibility of the weights in +the binary Reed-Muller codes, Seventh International Workshop on Opti- +mal Codes and Related Topics September 6–12, 2013, Albena, Bulgaria +pp. 47–52. http://www.moi.math.bas.bg/oc2013/a7.pdf +[2] E. R. Berlekamp and N. J. A. Sloane. Restrictions on the weight dis- +tributions of the Reed-Muller codes. Information and Control 14, pp. +442-446, 1969. +[3] C. Carlet. Boolean Functions for Cryptography and Coding Theory. +Cambridge University Press, 2021. +[4] T. Kasami and N. Tokura. On the weight structure of the Reed Muller +codes, IEEE Transactions on Information Theory 16, pp. 752-759, 1970. +[5] T. Kasami, N. Tokura, and S. Azumi. On the Weight Enumeration of +Weights Less than 2.5d of Reed-Muller Codes. Information and Control, +30:380–395, 1976. +[6] F. J. MacWilliams and N. J. Sloane. The theory of error-correcting codes, +North Holland. 1977. +[7] R. J. McEliece. Weight congruence for p-ary cyclic codes. Discrete Math- +ematics, 3, pp. 177-192, 1972. +[8] http://magma.maths.usyd.edu.au/calc/ +[9] S. Mesnager and A. Oblaukhov. Classification of the codewords of +weights 16 and 18 of the Reed-Muller code RM (n-3, n). IEEE Trans- +actions on Information Theory 68.2, pp. 940-952, 2021. +10 + +[10] V. S. Pless, W. C. Huffman, Eds, R. A. Brualdi, assistant editor. Hand- +book of Coding Theory, Elsevier, 1998. +[11] M. Shi, X. Li, A. Neri and P. Sol´e. How many weights can a cyclic +code have? IEEE Transactions on Information Theory, vol. 66, no 3, p. +1449-1459, 2019. +[12] N. J. Sloane. Online Encyclopedia of Integer Sequences (OEIS). +http://oeis.org/wiki/Welcome +11 + diff --git a/QdFRT4oBgHgl3EQfKDdm/content/tmp_files/load_file.txt b/QdFRT4oBgHgl3EQfKDdm/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..c618b3d3f7a68f0a9ff794d00b2362c8f953422b --- /dev/null +++ b/QdFRT4oBgHgl3EQfKDdm/content/tmp_files/load_file.txt @@ -0,0 +1,444 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf,len=443 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='13497v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='IT] 31 Jan 2023 The weight spectrum of several families of Reed-Muller codes Claude Carlet,∗ Universities of Paris 8, France and Bergen, Norway.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' E-mail: claude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='carlet@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='com Patrick Sol´e, I2M (CNRS, Aix-Marseille University, Centrale Marseille), Marseilles, France, E-mail: sole@enst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='fr Abstract We determine the weight spectrum of RM(m − 3, m) for m ≥ 6, of RM(m − 4, m) for m ≥ 8, and of RM(m − 5, m) for m ≥ 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The technique used is induction on m based on Corollary 2 of (Shi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Keywords: Reed Muller codes, weight spectrum MSC (2020): 94B27, 94D10 1 Introduction Determining the Hamming weights in Reed-Muller codes has been considered an important research topic for more than half a century [6, Chapt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weights of the Reed-Muller codes of length 2m and orders 0, 1, 2, m − 2, m − 1, m are known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' These weights equal 0, 2m for the order 0, with additionally 2m−1 for the order 1, and 2m−1 ± 2i where m 2 ≤ i ≤ m for the order 2, see ∗The research of the author is partly supported by the Norwegian Research Council.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1 e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weights in RM(m, m) are all integers between 0 and 2m since RM(m, m) = F2m 2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' the weights in RM(m−1, m) are all even integers between 0 and 2m;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' the weights in RM(m − 2, m) (the extended Hamming code) are all even integers between 0 and 2m except 2 and 2m − 2, since by the Mac Williams identity (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [6, Chapt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 5] or [3]), the weight distribution can be obtained as a function of the weight distribution of its dual the RM(1, m), a two-weight code with A0 = A2m = 1 and A2m−1 = 2m+1−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Another method, by induction on m, is given in [11];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' we do not give it here but it will be central for the other orders that we shall address.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' All the low Hamming weights are known in all Reed-Muller codes: Berlekamp and Sloane [2] (see the Addendum in this paper) and Kasami and Tokura [4] have shown that, for r ≥ 2, the only Hamming weights in RM(r, m) occurring in the range [2m−r;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 2m−r+1[ are of the form 2m−r+1 − 2i for some i;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' and the latter have completely characterized the codewords: the corresponding functions are affinely equivalent to � x1 · · ·xr−2(xr−1xr ⊕ xr+1xr+2 ⊕ · · · ⊕ xr+2l−3xr+2l−2), for 2 ≤ 2l ≤ m − r + 2, x1 · · ·xr−l(xr−l+1 · · · xr ⊕ xr+1 · · · xr+l), for 3 ≤ l ≤ min(r, m − r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The functions whose Hamming weights are strictly less than 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='5 times the minimum distance 2m−r have later been studied in [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Those possible weights of the codewords in the Reed-Muller codes of orders 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , m − 4 whose values lie between 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='5 d and 2m − 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='5 d are unknown (but when m = 2r + 1, they are known in some cases by using invariant theory, because the code is then self-dual, see [6, 10]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' In this note, we completely determine the weights in RM(m−3, m), RM(m− 4, m) and RM(m − 5, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Note that our work is on a different tack than works such as [9] in which the authors consider some weights in some Reed- Muller codes and look for all the functions having these weights, whereas we are looking for the weights (all of them) in some Reed-Muller codes and we do not try to find the functions having these weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We first observe that the work of [11, Theorem 16 (2)] can be made much more precise: this reference provides some weights in RM(m − 3, m) and we show that these weights are in fact all weights thanks to the result of Kasami-Tokura [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We extend then the method to the codes RM(m − 4, m) and RM(m − 5, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The material is arranged as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The next section recalls some basic definitions and notions needed to understand the rest of the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Sections 3, 4 and 5 study the weight spectra of RM(m − 3, m), RM(m − 4, m) and 2 RM(m − 5, m), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Section 6 describes some numerical examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Section 7 concludes the article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 2 Preliminaries The Hamming weight (in brief, the weight) of an element x = (x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , xn) ∈ Fn 2 is the number of indices i such that xi ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' A binary linear code of length n is an F2-subspace of Fn 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Its dimension is its dimension as an F2-vector space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Its minimum distance (in brief, distance) is the minimum Hamming weight of a nonzero codeword.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The extension �C of a linear code C of length n is the linear code of length n+1 such that each codeword (c0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , cn) ∈ �C satisfies both following conditions 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' (c0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , cn−1) ∈ C, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' n+1 � i=0 ci = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weights of a code are the Hamming weights of all its codewords.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The set of distinct weights (including the zero weight) is called the weight spectrum1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The list of the numbers, classically denoted by Ai, of codewords of weight i for i ranging from 0 to n is called the weight distribution of the code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The Magma notation for this quantity is the list of pairs < i, Ai > where Ai ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The Reed Muller codes are a family of binary linear codes of length n = 2m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Given the order r ∈ {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , m} of such a code (usually denoted by RM(r, m)), the dimension equals r� i=0 �m i � and the minimum distance equals 2m−r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' An explicit definition in terms of Boolean functions is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Let Bm denote the vector space of polynomials in m variables with coefficients in F2, that is, of elements of F2[x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , xm], in which the exponent of each variable xi in each monomial equals 0 or 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Write Fn 2 = {P1, P2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , Pn}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1Contrary to the spectra used for instance in Boolean function theory, the weight spectrum in coding theory does not include the indication of the multiplicities of the weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 3 Let ev denote the evaluation map from Bm to Fn 2 by the rule ev(f) = (f(P1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , f(Pn)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' With this notation we define the Reed-Muller code of order r by RM(r, m) = {ev(f) | f ∈ Bm & deg(f) ≤ r}, where deg(f) is the global degree of the multivariate polynomial f (called the algebraic degree of the Boolean function that f represents).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We will use repeatedly the following Lemma, which is the case q = 2 of [11, Cor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' For all pairs of integers (r, m) with 0 ≤ r ≤ m, the weight spectrum of RM(r + 1, m + 1) includes as a subset S + S, where S is the weight spectrum of RM(r, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We shall refer to the following result as McEliece’s congruence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weights in RM(r, m) are multiples of 2⌊ m−1 r ⌋ [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' This bound is tight in the sense that there is at least a codeword of RM(r, m) with weight (2t + 1)2⌊ m−1 r ⌋ for some integer t [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We will call the following deep result the Kasami-Tokura bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' It is a consequence of [6, Chapt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 15, th.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 11], which also specifies explicitly Aw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Let w be a weight of RM(r, m) in the range 2m−r ≤ w < 2m−r+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Let α = min(r, m − r), and β = m−r+2 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weight w is of the form w = 2m−r+1 − 2m−r+1−µ, for µ in the range 1 ≤ µ ≤ max(α, β).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Conversely, for any such µ, there is a w of that form in the range 2m−r ≤ w < 2m−r+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We will also require the notion of BCH code of length n and designed distance d, hereby denoted by BCH(n, d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' See [6, Chapt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 9] for a precise definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 3 The weights of the Reed-Muller codes of length 2m and order m − 3 For these codes, we have some information from the Mac Williams identity, but not much since the weight distribution of RM(2, m) is rather complex, in particular A2m−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' It is shown in [11, Theorem 16] by using Magma [8] and by induction on m that all the integers in {0, 2, 4, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 2m} \\ {2, 4, 6, 10, 2m − 4 2, 2m − 4, 2m − 6, 2m − 10} are weights in RM(m − 3, m) for m ≥ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The method used consists in applying Lemma 1, which says that the set of weights in RM(m − 3, m) contains A + A where A is the set of weights in RM(m − 4, m − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The authors start then from RM(3, 6), whose weights can be obtained by Magma [8]: {0, 2, 4, 6, 8, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 64}\\{2, 4, 6, 10, 54, 58, 60, 62}, and they proceed very simply by induction on m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We shall detail a little their proof (which is slightly informal) and show that the set above covers in fact all weights in RM(m − 3, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' For every m ≥ 6, the weights in RM(m − 3, m) are the elements of {0, 2, 4, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 2m} \\ {2, 4, 6, 10, 2m − 10, 2m − 6, 2m − 4, 2m − 2} = {0, 8, 12 + 2i, 2m − 12, 2m − 8, 2m}, where i ranges over consecutive integers from 0 to 2m−1 − 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The result is correct for m = 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' assuming it is correct for m ≥ 6, then denoting the set of weights by A, we have that A contains {0, 8, 12, 14, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 2m− 14, 2m −12, 2m −8, 2m} and therefore A+A contains {0, 8, 12, 14, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 2m+1 − 14, 2m+1−12, 2m + 1−8, 2m+1}, since the case of 0, 8, 12, 14, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 2m−14, 2m− 12, 2m − 8, 2m is covered by adding 0 to an element of A, the case of 2m, 8 + 2m, 12 + 2m, 14 + 2m, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , 2m+1 − 14, 2m+1 − 12, 2m+1 − 8, 2m+1 is covered by adding 2m to an element of A, and the remaining numbers 2m−10, 2m−6, 2m− 4, 2m − 2, 2m + 2, 2m + 4, 2m + 6, 2m + 10 are easily covered as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' By an in- duction using Lemma 1, all the numbers in {0, 8, 12+2i, 2m −12, 2m −8, 2m} are then weights in RM(m − 3, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' To complete the proof we show that these are the only possible weights in RM(m − 3, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The minimum distance of RM(m − 3, m) being 8, and the code being stable under addition of the all-one vector (the constant Boolean function 1), the numbers 2, 4, 6, 2m − 2, 2m − 4, 2m − 6 cannot be weights, and according to Theorem 2 ([2, 4]), the only weights in the integral interval [8, 16) are the numbers of the form 16 −2i in this interval, and 10 is not among them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 4 The weights of the Reed-Muller codes of length 2m and order m − 4 Adapting the proof above to the order m − 4 needs to have the weights of RM(3, 7) or better RM(4, 8) (since we can observe that starting from 5 RM(3, 7) makes us lose some weights).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' It is impossible to run Magma [8] exhaustively in RM(3, 7) (and a fortiori in RM(4, 8)), because this code has size 264, but the weights of these codes are known from the Online Encyclo- pedia of Integer Sequences [12, http://oeis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='org/A146976].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' ( See §4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='1 below for a direct determination of the weights).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weights in RM(4, 8) are {0, 16, 24, 28, 30, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=', 226, 228, 232, 240, 256}, where the dots represent all even integers between 32 and 224.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' This can be generalized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' For every m ≥ 8, the set of all weights in RM(m − 4, m) equals {0, 16, 24, 28 + 2i, 2m − 28, 2m − 24, 2m − 16, 2m}, where i ranges over the set of consecutive integers from 0 to 2m−1 − 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The proof is similar to that of Proposition 1: using Lemma 1, we can show that the set of weights contains {0, 16, 24, 28+2i, 2m−28, 2m−24, 2m− 16, 2m} and Theorem 2 tells us there is nothing else.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Note that by Theorem 1 all weights are even.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Note that the weight distribution of RM(5, 9) is given in Online Encyclo- pedia of Integer Sequences, see [12, http://oeis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='org/A018897], and it confirms our result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 5 The weights of the Reed-Muller codes of length 2m and order m − 5 The special cases RM(3, 8) and RM(4, 9) are given in detail in the next section §6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='1, §6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Note that the weight distribution of RM(3, 8) is known (again see §6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='2), unlike that of RM(4, 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' For every m ≥ 9, the set of all weights in RM(m − 5, m) equals {0, 32, 48, 56, 60, 64+4i, 2m−60, 2m −56, 2m −48, 2m −32, 2m}, where i ranges over the set of consecutive integers from 0 to 2m−2 − 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The proof is quite similar to those of the previous propositions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' It uses induction on m, as well as Lemma 1, Theorem 1, Theorem 2, and is omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 6 6 Numerical examples In the present section, we show how some known weight spectra can be obtained as well as some new ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='1 The weights of RM(3, 7) The weights of RM(2, 6) are by the results in [6, Chapter 15] equal to: S := {0, 16, 24, 28, 32, 36, 40, 48, 64} (actually, they are given in [12, OEIS A001726]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' A direct hand calculation or a computation in Magma [8] yields: S + S = {0, 16, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 112, 128}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' By McEliece’s congruence (Theorem 1), the weights in RM(3, 7) are multiples of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' And by [4] the weight 20 and its complement to 64 are excluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Hence, S + S equals the whole weight spectrum of RM(3, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='2 The weights of RM(3, 8) The present subsection will show why the induction of Proposition 3 must start at m = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weights in RM(2, 7) are by the results in [6, Chapter 15] equal to S := {0, 32, 48, 56, 64, 72, 80, 96, 128} (and this is confirmed by [12, OEIS A006006 ]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' A direct hand calculation or a computation in Magma [8] yields S + S = {0, 32, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 224, 256} By Theorem 2, this list is complete in the region {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' , 64} and in its complement to 256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' However all the elements in S + S are multiple of 8, while Theorem 1 states that the weights in RM(3, 8) are multiples of 4, and some weights are not multiple of 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Hence S + S is strictly included in the spectrum of RM(3, 8), since McEliece’s congruence is known to be tight [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 7 This is confirmed by the known weight distribution [12, A146953 ] https://isec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='ec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='okayama-u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='jp/home/kusaka/wd/RM/tomita/RM256 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='wd which gives in addition the weights: 100, 108, 116, 124, 132, 140, 148, 156, 164, that are congruent to 4 modulo 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' These weights can be recovered by taking the intersection of RM(3, 8) with the extension of BCH(255, 19).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The weight distribution of this in- tersection (a code of dimension 26,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' small enough to allow the use of the WeightDistribution command of Magma) is: [< 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 80,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 8 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 88,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 56 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 92,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 512 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 96,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 4939 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 100,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 30216 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 104,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 159164 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 108,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 615184 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 112,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1851060 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 116,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 4389152 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 120,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 8126540 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 124,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 11733960 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 128,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 13287280 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 132,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 11733960 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 136,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 8126540 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 140,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 4389152 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 144,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1851060 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 148,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 615184 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 152,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 159164 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 156,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 30216 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 160,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 4939 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 164,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 512 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 168,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 56 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 176,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 8 >,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' < 256,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1 >].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Remark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' We do not provide, properly speaking, a computer-free determina- tion of the weight spectrum of RM(3, 8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' But we show how it can be derived in a reproducible way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' ⋄ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='3 The weights of RM(4, 9) According to the previous subsection, the weight spectrum of RM(3, 8) equals: S = {0, 32, 48, 56, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 200, 208, 224, 256}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' A Magma calculation yields S + S = {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 32,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 48,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 56,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 64,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 68,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 72,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 76,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 80,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 84,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 88,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 92,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 96,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 100,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 104,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 108,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 112,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 116,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 8 120,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 124,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 128,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 132,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 136,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 140,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 144,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 148,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 152,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 156,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 160,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 164,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 168,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 172,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 176,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 180,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 184,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 188,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 192,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 196,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 200,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 204,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 208,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 212,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 216,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 220,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 224,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 228,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 232,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 236,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 240,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 244,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 248,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 252,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 256,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 260,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 264,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 268,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 272,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 276,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 280,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 284,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 288,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 292,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 296,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 300,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 304,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 308,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 312,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 316,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 320,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 324,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 328,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 332,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 336,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 340,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 344,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 348,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 352,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 356,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 360,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 364,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 368,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 372,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 376,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 380,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 384,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 388,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 392,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 396,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 400,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 404,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 408,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 412,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 416,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 420,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 424,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 428,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 432,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 436,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 440,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 444,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 448,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 456,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 464,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 480,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 512}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' By McEliece’s congruence [7] the weights in RM(4, 9) are multiples of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' By Theorem 2 the weights 36, 40, 44, 52 and their complements to 512 are excluded, since 64 − 36 = 28, 64 − 40 = 24, 64 − 52 = 12, which are not powers of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' However, still by Theorem 2 the integer 60 = 64 − 4 = 26 − 26−4 is a weight of RM(4, 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Note that this integer does not appear in the spectrum of RM(3, 8) since then, in the notation of Theorem 2, µ ≤ ⌊max(3, 7 2)⌋ = 3, when µ = 4 for the weight 60 of RM(4, 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' This shows that the statement of Proposition 3 can only be valid for m ≥ 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Hence {60} ∪ S + S is the whole spectrum of RM(4, 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 7 Conclusion and open problems In this note, we have derived the weight spectra of three infinite families of Reed-Muller codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The obstruction to further generalization is merely computational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The base point of the recurrence might not be amenable to exact enumeration by computer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' For instance the weight spectrum of RM(4, 10) is not known, and the weight spectrum of RM(3, 9) given in http://oeis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='org/A018895 is too sparse to derive that of RM(4, 10) by the techniques in this note.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Based on these results we can formulate the following: Conjecture: Let c be any positive integer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Then for m large enough, the weight spectrum of RM(m−c, m) is made of all the weights between the min- imum distance 2c and its complement to the length 2m, that are authorized by McEliece’s congruence and Kasami-Tokura’s result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 9 This conjecture is verified for c = 1, 2, 3, 4, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' In view of the ternary and quinary analogues of Theorem 15 of [11], namely Theorems 17 and 18 of [11], it is natural to ask for analogues of our results for Generalized Reed Muller codes of characteristics 3 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' However, an exact analogue of Theorem 2 in that context does not seem to be available from the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' References [1] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Borissov, On McEliece’s result about divisibility of the weights in the binary Reed-Muller codes, Seventh International Workshop on Opti- mal Codes and Related Topics September 6–12, 2013, Albena, Bulgaria pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 47–52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='moi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='bas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='bg/oc2013/a7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='pdf [2] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Berlekamp and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Sloane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Restrictions on the weight dis- tributions of the Reed-Muller codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Information and Control 14, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 442-446, 1969.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [3] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Carlet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Boolean Functions for Cryptography and Coding Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Cambridge University Press, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [4] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Kasami and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Tokura.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' On the weight structure of the Reed Muller codes, IEEE Transactions on Information Theory 16, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 752-759, 1970.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [5] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Kasami, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Tokura, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Azumi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' On the Weight Enumeration of Weights Less than 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='5d of Reed-Muller Codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Information and Control, 30:380–395, 1976.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [6] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' MacWilliams and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Sloane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' The theory of error-correcting codes, North Holland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [7] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' McEliece.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Weight congruence for p-ary cyclic codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Discrete Math- ematics, 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 177-192, 1972.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [8] http://magma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='maths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='usyd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='au/calc/ [9] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Mesnager and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Oblaukhov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Classification of the codewords of weights 16 and 18 of the Reed-Muller code RM (n-3, n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' IEEE Trans- actions on Information Theory 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 940-952, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 10 [10] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Pless, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Huffman, Eds, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Brualdi, assistant editor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Hand- book of Coding Theory, Elsevier, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Shi, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Li, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Neri and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Sol´e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' How many weights can a cyclic code have?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' IEEE Transactions on Information Theory, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 66, no 3, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' 1449-1459, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' [12] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Sloane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' Online Encyclopedia of Integer Sequences (OEIS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content=' http://oeis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} +page_content='org/wiki/Welcome 11' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QdFRT4oBgHgl3EQfKDdm/content/2301.13497v1.pdf'} diff --git a/RdFPT4oBgHgl3EQfpzV2/content/tmp_files/2301.13139v1.pdf.txt b/RdFPT4oBgHgl3EQfpzV2/content/tmp_files/2301.13139v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..c3fec10fef10e08f8a36bc47fa43b82e5b43c221 --- /dev/null +++ b/RdFPT4oBgHgl3EQfpzV2/content/tmp_files/2301.13139v1.pdf.txt @@ -0,0 +1,2678 @@ +arXiv:2301.13139v1 [stat.ML] 30 Jan 2023 +A Novel Framework for Policy Mirror Descent with General +Parametrization and Linear Convergence +Carlo Alfano 1 +Rui Yuan 2 +Patrick Rebeschini 1 +Abstract +Modern policy optimization methods in ap- +plied reinforcement learning are often in- +spired by the trust region policy optimiza- +tion algorithm, which can be interpreted as a +particular instance of policy mirror descent. +While theoretical guarantees have been es- +tablished for this framework, particularly in +the tabular setting, the use of a general +parametrization scheme remains mostly un- +justified. In this work, we introduce a novel +framework for policy optimization based on +mirror descent that naturally accommodates +general parametrizations. The policy class in- +duced by our scheme recovers known classes, +e.g. tabular softmax, log-linear, and neural +policies. It also generates new ones, depend- +ing on the choice of the mirror map. +For +a general mirror map and parametrization +function, we establish the quasi-monotonicity +of the updates in value function, global linear +convergence rates, and we bound the total +variation of the algorithm along its path. To +showcase the ability of our framework to ac- +commodate general parametrization schemes, +we present a case study involving shallow neu- +ral networks. +1. Introduction +Policy optimization represents one of the most widely- +used classes of algorithms for reinforcement learning +(RL). Among policy optimization techniques, policy +gradient (PG) methods are gradient-based algorithms +that optimize the policy over a parametrized policy +class and have emerged as a popular class of algo- +rithms for RL (Williams and Peng, 1991; Sutton et al., +1Department +of +Statistics, +University +of +Oxford, +United Kingdom +2LTCI, T´el´ecom +Paris and Institut +Polytechnique de Paris, +France. +Correspondence to: +Carlo +Alfano +, +Rui +Yuan . +1999; Konda and Tsitsiklis, 2000; Baxter and Bartlett, +2001). +The design of gradient-based policy updates has been +key in achieving empirical successes in many settings, +such as games (Berner et al., 2019) and autonomous +driving (Shalev-Shwartz et al., 2016). In particular, a +class of PG algorithms that has proven successful in +practice consists in building updates that include a +penalty term ensuring that the distance between the +updated policy and the previous policy is bounded. +Two main examples of algorithms belonging to this +category are trust region policy optimization (TRPO) +(Schulman et al., 2015a), which imposes a KL diver- +gence constraint on its updates, and policy mirror de- +scent (PMD) (Tomar et al., 2022; Xiao, 2022), which +applies mirror descent (MD) (Nemirovski and Yudin, +1983; Beck and Teboulle, 2003) to RL and recovers +TRPO as a special case. +From a theoretical perspective, motivated by the suc- +cess of PMD in practice, there is now a concerted ef- +fort to develop convergence theories for PMD meth- +ods. For instance, it has been established that PMD +converges linearly to the global optimum in the tab- +ular setting by using a geometrically increasing step +sizes (Lan, 2022; Xiao, 2022), by adding entropy reg- +ularization (Cen et al., 2021) and more generally by +adding convex regularization (Zhan et al., 2021). The +linear convergence of PMD has also been established +for the negative entropy mirror map and in the linear +function approximation regime (Agarwal et al., 2020), +either by adding entropy regularization (Cayci et al., +2021), or by using a geometrically increasing step sizes +(Chen and Theja Maguluri, 2022; Yuan et al., 2022; +Alfano and Rebeschini, 2022). The proof of those re- +sults rely on specific policy parametrizations, that +is tabular and log-linear, respectively. +However, +PMD remains mostly unjustified for general policy +parametrizations, which leaves out important practi- +cal cases such as neural networks. +In particular, it +remains to see if the theoretical results obtained for +tabular policy classes transfer to this more general set- +ting. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +2 +In this work, we provide an affirmative answer to that +question by proposing a novel framework based on the +MD algorithm which recovers PMD in the tabular set- +ting, is capable of generating new algorithms and is +amenable to theoretical analysis for any parametriza- +tion class. +Since the MD update can be viewed as +a two-step procedure, that is an update on the dual +space and a mapping onto the probability simplex, our +starting point is to define the policy class based on +this second MD step. This policy class recovers the +softmax policy class as a special case (Example 4.2) +and accommodates any parametrization function, such +as tabular, linear or neural network parametrizations. +We then develop a new Approximate Mirror Policy +Optimization (AMPO) framework for this policy class, +based on the actor-critic family (Konda and Tsitsiklis, +2000) and on the mirror descent methodology. We il- +lustrate the versatility of our framework by providing +instances of our algorithm for different choices of mir- +ror map (Examples 4.3, 4.4 and 4.5). +In addition, we provide a theoretical analysis of +AMPO. The key point in our analysis is Lemma 5.1, +which is an extension of the three-point descent +lemma given in +Chen and Teboulle (1993, Lemma +3.2). Lemma 5.1 is established by exploiting the non- +expansivity property of the Bregman projection to ac- +count for general parameterization functions. This re- +sult, together with the formulation of AMPO, permits +us to keep track of errors induced by our choice of +policy class. +Building on Lemma 5.1, we establish theoretical guar- +antees for AMPO that hold for any parametrization +class and any mirror map. More precisely, we show +that our algorithm enjoys quasi-monotonic improve- +ments (Proposition 5.2), sublinear convergence when +the step-size is non-decreasing and linear convergence +when the step-size is increasing geometrically (Theo- +rem 5.3). Additionally, we give a bound on the total +Bregman divergence between consequent policies along +the path of the algorithm (Corollary 5.4), which sheds +light on how the choice of mirror map plays a role in +the regularization of the algorithm. +To the best of +our knowledge,AMPO is the first gradient-based algo- +rithm with linear convergence that can accommodate +any parametrization class and choice of mirror map. +As such, it improves upon (Agarwal et al., 2020) and +(Liu et al., 2019) by having a faster convergence rate +and upon (Cayci et al., 2022) by avoiding the need of +explicit regularization. +Lastly, we apply our results to the important case of +shallow neural networks and further theoretical deriva- +tions enable us to show that the error coming from our +choice of policy class can be made arbitrarily small as +a function of the width of the network (Theorem 5.5). +2. Related work +Lately, there has been a lot of work on providing +theoretically sound algorithms for RL. In particu- +lar, there has been a lot of attention around algo- +rithms inspired by mirror descent and, more specif- +ically, by natural gradient descent. +These two ap- +proaches have been applied to the tabular setting, +in both regularized and unregularized MDPs, and +have led to many developments (Agarwal et al., 2021; +Xiao, 2022). +Recent results include linear conver- +gence to the optimal policy for several formulations +and variations of these algorithms (Cen et al., 2021; +Zhan et al., 2021; Khodadadian et al., 2021; Li et al., +2022; Lan, 2022; Bhandari and Russo, 2021; Mei et al., +2020; Grudzien et al., 2022). +Given the high computational and sample complexities +of the tabular setting, researchers have tried to extend +these results to general policy parametrization, aim- +ing at justifying the empirical success of parametrized +policy classes. Most works in this area regard the appli- +cation of neural networks or smooth parametrization +functions to softmax policies and natural policy gradi- +ent (NPG) updates and provide sublinear convergence +rate guarantees (Agarwal et al., 2021; Liu et al., 2019; +Cayci et al., 2021; 2022; Wang et al., 2020). +Out- +side the NPG framework, (Vaswani et al., 2022) have +shown that their algorithm enjoys monotonic improve- +ments for any mirror map, as long as smoothness and +convexity assumptions on the surrogate objective are +satisfied. +Our work fills the gap in the literature and provides +a framework with linear converge guarantees that can +accommodate any parameterization function and gen- +erate new algorithms by selecting mirror maps. +3. Preliminaries +In this section, we first present the main RL setting +before reviewing the mirror descent methodology. +3.1. RL setting +Let M = (S, A, P, r, γ, µ) be a discounted Markov De- +cision Process (MDP), where S is a possibly infinite +state space, A is a finite action space, P(s′|s, a) is the +transition probability from state s to s′ under action a, +r(s, a) ∈ [0, 1] is a reward function, γ is a discounted +factor, and µ is a starting state distribution. +The behaviour of an agent on an MDP is then modelled + +A Novel Framework for PMD with General Parametrization and Linear Convergence +3 +by a policy π ∈ (∆(A))S, where a ∼ π(· | s) is the +density of the distribution over actions at state s ∈ S +and ∆(A) is the probability simplex over A. +Given a policy π, let V π : S → R denote the associ- +ated value function. Letting st and at be the current +state and action at time t, the value function V π is +defined as the expected discounted cumulative reward +with starting state s0 = s, namely, +V π(s) := +E +at∼π(·|st) +st+1∼P (·|st,at) +� ∞ +� +t=0 +γtr(st, at) +����π, s0 = s +� +, +Now letting V π(µ) := Es∼µ[V π(s)], one of the main +objectives in RL is for the agent to find an optimal +policy +π⋆ ∈ argmax +π∈(∆(A))S V π(µ). +(1) +Similar to the value function, for each pair (s, a) ∈ +S × A, the state-action value function, or Q-function, +associated to the policy π is defined as +Qπ(s, a) = E +� ∞ +� +t=0 +γtr(st, at) | π, s0 = s, a0 = a +� +. +where at ∼ π(·|st) and st+1 ∼ P(·|st, at). +We also +define the discounted state visitation distribution by +dπ +µ(s) = (1 − γ)Es0∼µ +� ∞ +� +t=0 +γtP(st = s | π, s0) +� +, +where P(st = s | π, s0) represents the probability of +the agent being in state s at time t when following +policy π and starting from s0. +The distribution dπ +µ +represents the time spent on state s when following +policy π. Policy gradients refer to the gradients of the +value functions V π(µ). Such gradient of the value func- +tion with respect to the policy can be easily expressed +by the policy gradient theorem(Sutton et al., 1999): +∂V π(µ) +∂π(·|s) = Es∼dπ +µ[Qπ(s, ·)]. +(2) +We next introduce the mirror descent framework which +we will use to define and motivate a novel policy class. +3.2. Mirror descent +The first tools we recall from the MD framework are +that of a mirror map and of a Bregman divergence +(Bubeck, 2015, Chapter 4). Let Y ⊆ R|A| be a con- +vex set. A mirror map h : Y → R is a strictly con- +vex, continuously differentiable and essentially smooth +function1 such that ∇h(Y) = R|A|. The convex conju- +1h is essentially smooth if limx→∂Y ∥∇h(x)∥2 = +∞, +where ∂Y denotes the boundary of Y. +gate of h is denoted by h∗, that is, +h∗(x∗) = sup +x∈Y +⟨x∗, x⟩ − h(x), +x∗ ∈ R|A|. +The ∇h : Y → R|A| allows to map objects from the +primal space Y to its dual space R|A|, x �→ ∇h(x), and +viceversa for ∇h∗, i.e. x∗ �→ ∇h∗(x∗). In particular, +from ∇h(Y) = R|A|, we have: for all (x, x∗) ∈ Y×R|A|, +x = ∇h∗(∇h(x)) +and +x∗ = ∇h(∇h∗(x∗)). +(3) +Furthermore, the mirror map h induces a Bregman +divergence, defined as +Dh(x, y) = h(x) − h(y) − ⟨∇h(y), x − y⟩, +where Dh(x, y) +≥ +0 for all x, y +∈ +Y. +We +can +now +present +the +standard +MD +algorithm +(Nemirovski and Yudin, 1983; Bubeck, 2015). Let X ⊆ +Y be a convex set and V : X → R be a convex function. +The MD algorithm can be formalized as the following +iterative procedure in order to solve the minimization +problem minx∈X V (x): for all t ≥ 0, +yt+1 = ∇h(xt) − ηt∇V (x)|x=xt, +(4) +xt+1 = Projh +X(∇h∗(yt+1)), +(5) +where ηt is set according to a step-size schedule (ηt)t≥0 +and Projh +X(·) is the Bregman projection +Projh +X(y) = argmin +x∈X +Dh(x, y). +(6) +Precisely, at time t, xt ∈ X is mapped to ∇h(xt), from +which one takes a gradient step (4). +From (3), the +resulting point is zt+1 = ∇h∗(yt+1) ∈ Y such that +∇h(zt+1) = yt+1. While zt+1 may not belong to X, in +which case one takes a projection step (5). +In the next section, we explore how the MD setup we +just presented can be applied in the context of RL to +define a novel parametrized policy class alongside with +an efficient update rule. We will then show how this +framework can be applied for different parametrization +schemes, including neural networks as special case. +4. Approximate Mirror Policy +Optimization +The starting point of our framework is the introduction +of a novel parametrized policy class, which relies on the +Bregman projection expression recalled in (6). +Definition 4.1. Given a parametrized function class +FΘ = {f θ : S ×A → R, θ ∈ Θ}, a mirror map h : Y → +R, where Y ⊆ R|A| is a convex set with ∆(A) ⊆ Y, and + +A Novel Framework for PMD with General Parametrization and Linear Convergence +4 +η > 0, the Bregman projected policy class associated +to FΘ and h consists of all the policies of the form: +� +πθ : πθ +s = Projh +∆(A)(∇h∗(ηf θ +s )), s ∈ S; θ ∈ Θ +� +, +where, for all s ∈ S, πθ +s := πθ(· | s) and f θ +s := f θ(s, ·). +In this definition, the policy is induced by a mirror +map h and a parametrized function f θ and is obtained +by mapping f θ to Y, which may not a well-defined +probability distribution and is thus projected on +the convex probability simplex ∆(A). +Note that +the choice of h will be key in deriving convenient +expressions for the policy πθ. The Bregman projected +policy class contains large families of policy classes. +We provide below an example of h that recovers +widely used policy classes (Beck, 2017, Example 9.10). +Example 4.2 (Negative entropy mirror map). If +Y += +R|A| ++ +and h is the negative entropy mir- +ror map, i.e. h(π(·|s)) += +� +a∈A π(a|s) log(π(a|s)), +Projh +∆(A)(∇h∗(·)) is equivalent to the following com- +mon policy class +� +πθ : πθ +s = +exp(ηf θ +s ) +∥exp(ηf θs )∥1 +, s ∈ S; θ ∈ Θ +� +, +(7) +where the exponential and the fraction are element- +wise and ∥·∥1 is ℓ1 norm. +In particular, when +f θ(s, a) = θs,a, the policy class (7) becomes tabular +softmax policy; when f θ is a linear function, (7) be- +comes the log-linear policy; and when f θ is a neural +network, (7) becomes the neural policy class defined +by Agarwal et al. (2021). We refer to Appendix A.1 +for proofs and more details. +We now construct a policy mirror descent type algo- +rithm to optimize V πθ over the Bregman projected +policy class associated to a mirror map h and a +parametrization class FΘ by adapting Section 3.2 to +our setting. First, we use the following shorthand: at +each time t, let πt := πθt, f t := f θt, V t := V πt, +Qt := Qπt, and dt +µ := dπt +µ . +Further, for any func- +tion y : S × A → R and distribution v over S × A, +let ys := y(s, ·) ∈ R|A| and ∥y∥2 +L2(v) = Ev[(y(s, a))2]. +Ideally, we would like to execute the exact MD-based +algorithm: for all t ≥ 0 and for all s ∈ S, +f t+1 +s += ∇h(πt +s) + ηt∇πsV π(µ) +�� +π=πt +(2) += ∇h(πt +s) + ηtEs∼dtµ[Qt +s], +(8) +πt+1 +s += Projh +∆(A)(∇h∗(ηtf t+1 +s +)). +(9) +Here, (9) reflects our Bregman projected policy class +4.1. Once f t+1 +s +is provided, πt+1 +s +can be obtained with +Algorithm 1: Approximate Mirror Policy Optimization +Input: Initial policy π0, mirror map h, +parametrization class FΘ, actor error εactor > 0, +critic C, number of iterations T , step-size +schedule (ηt)t≥0, and sequence of state-action +distributions (vt)t≥0. +1 for t = 0 to T − 1 do +2 +Obtain �Qt from the critic C. +3 +Find θt+1 ∈ Θ such that +∥f θt+1 − �Qt − η−1 +t +∇h(πt)∥2 +L2(vt) ≤ εactor. +4 +πt+1 +s += argmin +π′∈∆(A) +Dh(π′, ∇h∗(ηtf θt+1 +s +)), ∀s ∈ S. +a computational cost of �O(|A|) (Krichene et al., 2015). +However, we usually cannot perform the update (8) +exactly. In general, we do not have access to the exact +gradient of the value function and especially, when f θ +belongs to a parametrized class, there might not exist +any θt+1 ∈ Θ such that (8) is satisfied for all s ∈ S. +To remedy these issues, we propose Approximate Mir- +ror Policy Optimization (AMPO), described in Algo- +rithm 1, which lies within the actor-critic algorithmic +family (Konda and Tsitsiklis, 2000). More specifically, +at each time t, our algorithm involves a critic C evalu- +ating the policy and an actor which updates the policy, +following an approximate version of updates (8) and +(9). +• On the one hand, in line 2 the critic C returns a +function �Qt : S×A → R that estimates the Q-function +Qt associated to the current policy πt and serves as an +approximation of the gradient of the value function at +time t (Agarwal et al., 2020; Xiao, 2022), hence solving +the first issue. +Since in this work we focus on the +policy optimization side of RL, we will assume that +we have access to the critic C, yet keeping in mind +that C will typically estimate the Q-function through +rollout (Schulman et al., 2015b) or temporal difference +learning (Tesauro et al., 1995). +• On the other hand, in line 3 the actor returns a +function f t+1 ∈ FΘ, such that expected actor error is +bounded by εactor for a generic probability distribution +vt over S ×A. Line 3 is an instance of function approx- +imation where we try to approximate �Qt + η−1 +t +∇h(πt) +with f t+1 and which has been extensively studied +when f θ is a neural network (Ji et al., 2019). More gen- +erally, f t+1 is not constrained to a specific explicit up- +date. If f θ is differentiable w.r.t. θ, one can obtain f t+1 +by minimizing the expression in line 3 with gradient de- +scent methods. We can then readily use (9) to update + +A Novel Framework for PMD with General Parametrization and Linear Convergence +5 +the policy πt+1 within our defined policy class in line 4. +We +can +now +give +a +detailed +comparison +be- +tween AMPO and previous approximations of PMD +(Vaswani et al., 2022; Tomar et al., 2022). In both ap- +proaches, the algorithm provides an expression to opti- +mize. For AMPO this expression is the one in line 3 of +Algorithm 1, while, for instance, Tomar et al. (2022) +aim to maximize an expression that is similar to +πt+1 = argmin +π∈Π +E +s∼ρ[⟨∇πsV πt(µ) +�� +π=πt, πs⟩+Dh(πs, πt +s)], +(10) +where Π is certain policy class. When the policy class +Π is the entire policy space ∆(A)S, this is equivalent +to the two step procedure (8)-(9). However, in practice +the parametrized policy class Π is non-convex, which +prevents the application of standard mirror descent +tools. On the contrary, AMPO side-steps this prob- +lem thanks to the definition of the Bregman projected +policy class and the update in line 4 of Algorithm 1, +as we will see in the theoretical analysis. +AMPO provides a flexible framework that accommo- +dates any parameterization class FΘ, as we highlight +in the following examples where we instantiate MPO +for specific choices of mirror map and show how it can +recover existing approaches to policy optimization. +Example 4.3 (Projected Q-descent (Xiao, 2022)). If +Y = R|A| and h is the squared ℓ2-norm, that is h(πs) = +∥πs∥2 /2, line 3 in Algorithm 1 becomes +E(s,a)∼vt +� +(f t+1(s, a) − �Qt(s, a) − πt(a|s))2� +≤ εactor, +and the policy update is given for all s ∈ S by +πt+1 +s += Projl2 +∆(A)(f t+1 +s +), +(11) +where Projl2 +∆(A) represents the Euclidean projection +on the probability simplex. +In the tabular setting +where S and A are finite and f θ(s, a) = θs,a, εactor can +be set to 0 (Agarwal et al., 2021) and (11) recovers +the projected Q-descent algorithm (Xiao, 2022). +Example 4.4 (NPG). If h is the negative entropy +mirror map used in Example 4.2, line 3 in Algorithm +1 becomes +����f t+1 − �Qt − ηt−1 +ηt +f t +���� +2 +L2(vt) +≤ εactor. +(12) +Consequently, based on Example 4.2, we have +πt+1 +s += +exp(ηtf t+1 +s +) +��exp(ηtf t+1 +s +) +�� +1 +, +(13) +for all s ∈ S. In this example, AMPO recovers tabular +NPG (Shani et al., 2020) and NPG with log-linear po- +lices (Hu et al., 2021) when f θ(s, a) = θs,a and when +f θ and �Qt are linear functions for all t ≥ 0, respec- +tively. We refer to Appendix A.1 for details and an +extension to Tsallis entropy. +We next present an example for a mirror map that has +not yet been considered in the RL literature. +Example 4.5 (Hyperentropy mirror map). If Y = +R|A| and hb is the hyper-entropy mirror map with scale +parameter b > 0, that is +hb(πs) = +� +a∈A +π(a|s) arcsinh(π(a|s)/b)− +� +π(a|s)2 + b2, +the update in line 3 of Algorithm 1 with h = hb be- +comes +����f t+1 − �Qt − ηt−1 +ηt +(f t − arcsinh cs) +���� +2 +L2(vt) +≤ εactor +and the policy satisfies +πt+1 +s += b sinh(f t+1 +s +) +� +1 + c2s − b cosh(fs)cs, +∀ s ∈ S, +where cs ∈ R has an explicit expression for all times t. +We refer to Appendix A.2 for more details. +Given +the +extensive +literature +on +mirror +maps +(Orabona, 2020; Vaˇskeviˇcius et al., 2020; Ghai et al., +2020), we expect our approach to pave the way for the +use of mirror maps tailored for MDP structures. We +leave this direction as future work but outline in the +example below how to instantiate AMPO for a class of +mirror maps, which includes the negative entropy and +the hyperentropy as particular case. +Example +4.6 +(ω-potential mirror map). For +a +parametrization function f t, let the mirror map h be +defined as +h(πs) = +� +a∈A +� π(a|s) +1 +φ−1(u)du +where φ is a ω-potential2. +Using a result by +Krichene et al. (2015, Proposition 2), the policy πt ob- +tained by Algorithm 1 can be written as +πt(a|s) = [φ−1(ηt−1f t(s, a) + λs)]+ +∀s ∈ S, a ∈ A, +where λs ∈ R for all s ∈ S and [z]+ = max(z, 0) for +z ∈ R. While there are some cases where λs has an +explicit expression, as we have shown in Examples 4.4 +and 4.5, this does not appear possible for all mirror +maps. +2For a ∈ (−∞, +∞] and ω ≤ 0, an increasing C1- +diffeomorphism φ : (−∞, a) → (ω, +∞) is called an ω- +potential if +lim +u→−∞ φ(u) = ω, lim +u→a φ(u) = +∞, +� 1 +0 +φ−1(u)du ≤ ∞. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +6 +5. Theoretical analysis +This section is devoted to the theoretical analysis of +AMPO, which will rely on the following lemma. For +convenience, denote Dπ +¯π(s) = Dh(πs, ¯πs) for all s ∈ S. +Lemma 5.1. For any policies π and ¯π, for any func- +tion f θ ∈ FΘ and for η > 0, we have for all s ∈ S +⟨ηf θ − ∇h(¯πs), πs − ˜πs⟩ ≤ Dπ +¯π(s) − D˜π +¯π(s) − Dπ +˜π(s), +where ˜π = argminπ′∈∆(A) Dh(π′, ∇h∗(ηf θ +s )) is induced +by f θ and η according to Definition 4.1. +We present the proof of Lemma 5.1 in Appendix B.1. +Lemma 5.1 describes a relationship between any two +policies and a policy belong to the Bregman projected +policy class associated to FΘ and h. While similar re- +sults have been obtained and exploited for the tabular +setting (Xiao, 2022) and for the negative entropy mir- +ror map (Liu et al., 2019; Hu et al., 2021), Lemma 5.1 +is the first to allow any parametrization class FΘ and +any choice of mirror map. Since Lemma 5.1 does not +depend on Algorithm 1, we expect it to be helpful in +contexts outside this work. +Lemma 5.1 becomes useful when we set ¯π = πt, f θ = +f t+1, η = ηt and π = πt or π = π⋆. In particular, +when ηtf t+1 +s +− ∇h(πt +s) ≈ ηtQπ +s , Lemma 5.1 enables us +to obtain telescopic sums and recursive relationships +and to handle error terms efficiently, as we show in +Appendix B. This is possible thanks to our two step +formulation, whereas Lemma 5.1 can not be applied to +algorithms based on the update in (10) (Tomar et al., +2022; Vaswani et al., 2022), due to the non-convexity +of the optimization problem. +In the rest of the paper, we consider fixed but arbitrary +the parametrization class FΘ and the mirror map h. +5.1. General policy parametrization +We show that AMPO enjoys (i) quasi-monotonic im- +provements, (ii) sublinear or linear convergence de- +pending on the step-size schedule and (iii) upper- +bounded total variation between updates of the algo- +rithm. The first step to do so is to control the two +approximation errors appeared in Algorithm 1 by As- +sumptions (A1) and (A2) below. +(A1) (Critic error). There exists εcritic ≥ 0 such that, +for all times t ≥ 0, the critic C returns a function +�Qt that satisfies +���Qt − �Qt��� +2 +L2(ρt) ≤ εcritic, +where (ρt)t≥0 is a sequence of distributions over +states and actions. +(A2) (Actor error). There exists εactor ≥ 0 such that, +for all times t ≥ 0, the actor returns a function +f t+1 that satisfies +���f t+1 − �Qt − η−1 +t +∇h(πt) +��� +2 +L2(vt) ≤ εactor, +where (vt)t≥0 is a sequence of distributions over +states and actions. +Assumption (A1) requires the critic to provide an es- +timate of the Q-function with a bounded expected +square error, where the expectation is taken over +some arbitrary distribution. +Several works focus +on designing methods to provides such estimates +(Schulman et al., 2015b). It is weaker than the stan- +dard critic error assumption in the RL literature +(Agarwal et al., 2020; Cayci et al., 2021), as vt can be +any distribution, not necessarily dependent to πt. +Assumption (A2) ensures the update line 3 of Algo- +rithm 1 holds so that AMPO is well defined. We will +in particular show later in the paper that, when FΘ +is a class of shallow neural networks, it is always pos- +sible to find f t+1 such that Assumption (A2) holds, +provided that a modulus of continuity condition is re- +spected. +We highlight that in both assumptions, the distribu- +tions ρt and vt do not depend on the current policy +πt for all times t ≥ 0. Therefore, Assumptions (A1) +and (A2) allow off-policy policy evaluation techniques +(Thomas and Brunskill, 2016) and policy updates. To +quantify how the choice of these distributions affect +the error terms in the convergence rates, we introduce +the following two coefficients. +(A3) (Concentrability coefficients). There exist C1 ≥ 0 +and C2 ≥ 0 such that, for all times t, the distribu- +tions ρt and vt satisfy +E(s,a)∼ρt +� �dπ +µ(s)π(a|s) +ρt(s, a) +�2 � +≤ C1 +and +E(s,a)∼vt +� �dπ +µ(s)π(a|s) +vt(s, a) +�2 � +≤ C2, +whenever (dπ +µ, π) is either (d⋆ +µ, π⋆), (dt+1 +µ +, πt+1), +(d⋆ +µ, πt) or (dt+1 +µ +, πt). +The concentrability coefficients C1 and C2 describe +how much the distributions ρt and vt overlap with +the distributions (d⋆ +µ, π⋆), (dt+1 +µ +, πt+1), (d⋆ +µ, πt) and +(dt+1 +µ +, πt). Our (A3) is weaker than the previous best + +A Novel Framework for PMD with General Parametrization and Linear Convergence +7 +known concentrability coefficients in Yuan et al. (2022, +Assumption 9) in the sense that we have the full con- +trol of (ρt, vt). See Appendix B.7 for more discussions +on the concentrability coefficients. +The quantities we have introduced so far determine the +main error term in the algorithm, which we denote by +τ = +2 +1 − γ ( +� +C1εcritic + +� +C2εactor). +We can now present Proposition 5.2, which shows that +Algorithm 1 enjoys quasi-monotonic updates. +Proposition 5.2. Let (A1), (A2) and (A3) be true. +Then, for all times t ≥ 0, Algorithm 1 enjoys quasi- +monotonic updates, that is +V t+1(µ) − V t(µ) ≥ −τ. +We refer to Appendix B.3 for a proof. Proposition 5.2 +ensures that an update of Algorithm 1 cannot lead to +a decrease in performance larger than τ. +We next present our main convergence results. To do +so, we need the following assumption on the state space +coverage for the agent at each time t. +(A4) (Distribution mismatch coefficient). +Let d⋆ +µ := +dπ⋆ +µ . There exists C3 ≥ 0 such that, for all times +t ≥ 0, +max +s∈S +d⋆ +µ(s) +dtµ(s) ≤ C3. +Since dt +µ(s) ≥ (1 − γ)µ(s) for all s ∈ S, we have that +max +s∈S +d⋆ +µ(s) +dtµ(s) ≤ +1 +1 − γ max +s∈S +d⋆ +µ +µ , +where assuming boundedness for the term on the right- +hand side is standard in the policy optimization liter- +ature (Agarwal et al., 2020; Xiao, 2022). +We also denote the expected Bregman divergence be- +tween the optimal policy and the initial policy π0 +by D⋆ +0 = Es∼d⋆ +µ[Dh(π⋆ +s, π0 +s)], where the expectation is +taken over the discounted state visitation distribution +associated to the optimal policy. We then have Theo- +rem 5.3 below. +Theorem 5.3 (Convergence rates). Let (A1), (A2), +(A3) and (A4) be true. If the step-size schedule is non- +decreasing, i.e. ηt ≤ ηt+1 for all t ≥ 0, we have that +the iterates of Algorithm 1 satisfy: for every T ≥ 0, +V ⋆(µ) − 1 +T +� +t 0, the total Bregman +divergence of the path of the algorithm is always upper +bounded by the expected Bregman divergence between +the optimal and the initial policy plus a constant error +term proportional to c. +We emphasize how the choice of the mirror map h in +the definition of Algorithm 1 influences these results +by way of a Bregman divergence term. The expected +Bregman divergence D⋆ +0 between the optimal and the +starting policy appears explicitly in statements of The- +orem 5.3 and Corollary 5.4, which motivates to find a +starting policy π0 and a mirror map h such that D⋆ +0 +is small. Additionally, the statement of Corollary 5.4 +has different meanings for different mirror maps. We +compare, for instance, the ℓ2-norm and the entropy +mirror map, which induce, respectively, the Euclidean +distance and the KL divergence. Since +∥πs − ¯πs∥2 +2 ≤ ∥πs − ¯πs∥2 +1 ≤ 2KL(πs, ¯πs), +the ℓ2-norm will induce an algorithmic path with a +larger total variation distance than the one induced by +entropy mirror map, for the same step-size schedule. +In the next section, we show that εactor can be made +arbitrarily small when f θ is parametrized by a shallow +neural network. +5.2. Neural network parametrization +Neural +networks +are +a +popular +choice +for +the +parametrization function f θ due to their empirical suc- +cesses in RL applications. Yet, few theoretical guar- +antees exist for this parametrization and we there- +fore examine here how we can use our framework to +build novel theoretical results. We will consider the +case where, for each action a ∈ A, f θ(·, a) belongs to +the family of shallow ReLU networks, which has been +shown to be a universal approximator (Jacot et al., +2018; Allen-Zhu et al., 2019; Du et al., 2019; Ji et al., +2019) and is defined, for s ∈ S ⊆ Rn and a ∈ A, as +f θ(·, a) : s �→ +m +� +j=1 +xa +j σ(⟨wa +j , s⟩ + ba +j ), +where σ(y) = max(y, 0) for all y ∈ R, wa +j ∈ Rn and +ba +j , xa +j ∈ R for all j = 1, . . . , m, with m ∈ N+. +In the context of Algorithm 1, we want to show that +εactor can be made arbitrarily small by choosing a wide +enough shallow ReLU network for the parametrization +of f θ(·, a), for all a ∈ A. In other words, at all times +t, we want to find an approximation f t+1 of gt := +�Qt − η−1 +t +∇h(πt). +We achieve this by extending the +framework developed by Ji et al. (2019) to our setting. +In order to do so, we first introduce the modulus of +continuity ωgt for function gt, which is defined as +ωgt(δ) := sup{gt(s, a) − gt(s′, a) : a ∈ A +max(∥s∥2 , ∥s′∥2) ≤ 1 + δ, ∥s − s′∥2 ≤ δ}, +Moreover, given a signed density p : Rn+1 → R , we +define a sample from p as (w, b, x), where (w, b) is sam- +pled from |p|/ ∥p∥1 and x = sign(p(w, b)). +We can +now state the following result, which gives an explicit +bound on εactor. Denote ∥p∥L1 = +� +|p(w)|dw and with- +out loss of generality, assume that ∥s∥2 ≤ 1 for all +s ∈ S. We then have the following theorem, which is +adapted from Ji et al. (2019, Theorem E.1) +Theorem 5.5. There exist a set of signed densities +(pa)a∈A and a set of parameters ((xa +j , wa +j , ba +j))a∈A for +j ∈ {m + 1, m + 2, m + 3} such that, if +f t+1(s, a) = 1 +m +m+3 +� +j=1 +xa +j σ(⟨wa +j , s⟩ + ba +j ), +where ((xa +j , wa +j , ba +j ))m +j=1 are sampled from pa, for all +a ∈ A, with probability at least 1 − 3λ we have: for all +times 0 ≤ t < T , +√εactor ≤ 3 max +t 1, +where x ∈ R and [z]+ = max(z, 0), and +logq(y) = +� +log(y) +if q = 1, +yq−1−1 +q−1 +if q > 1, +where y ≥ 0. Then the Tsallis entropy mirror map is defined as +hq(πs) = 1 +q +�� +π(a|s) logq(π(a|s)) − π(a|s) +� +. +To solve the minimization problem +πθ +s = argmin +πs∈∆(A) +Dhq(πs, ∇h∗ +q(ηt−1f t +s)) +we use the KKT conditions. We formalize it as +argmin +πs∈R|A| +argmin +πs∈∆(A) +Dhq(πs, ∇h∗ +q(ηt−1f t +s)) +subject to ⟨πs, 1⟩ = 1 +π(a|s) ≥ 0 +∀ a ∈ A. +The conditions then become +(stationarity) +logq(πs) − ηt−1f t +s + λs1 − +� +a∈A +ca +sea = 0, +(complementary slackness) +ca +sπ(a|s) = 0 +∀ a ∈ A, +(primal feasibility) +⟨πs, 1⟩ = 1, π(a|s) ≥ 0 +∀ a ∈ A, +(dual feasibility) +ca +s ≥ 0 +∀ a ∈ A, + +A Novel Framework for PMD with General Parametrization and Linear Convergence +13 +where logq(πs) is applied element-wise, λs and (ca +s)a∈A are the dual variables and ea is a vector of all 0 with a +1 in the position associated to a, for all a ∈ A. If we set ca +s = 0 for all a ∈ A, the complementary slackness and +dual feasibility conditions are satisfied and, from the stationarity condition, we obtain that +πt(a|s) = expq(ηt−1f t(s, a) − λs). +Since � +a∈A expq(ηt−1f t(s, a) − λs) converges to 0 and +∞ when λs goes to +∞ and −∞, respectively, there +always exists a λs such that the primal feasibility condition is satisfied, thanks to the intermediate value theorem. +When q = 1, λs = log ∥exp(ηt−1f t +s)∥1. +A.2. Example 4.5 +Let hb be the hyperentropy mirror map with scale parameter b > 0, that is +hb(πs) = +� +a∈A +π(a|s) arcsinh(π(a|s)/b) − +� +π(a|s)2 + b2. +To solve the minimization problem +πθ +s = argmin +πs∈∆(A) +Dhb(πs, ∇h∗ +b(ηt−1f t +s)) +we use the KKT conditions. We formalize it as +argmin +πs∈R|A| +argmin +πs∈∆(A) +Dhb(πs, ∇h∗ +b(ηt−1f t +s)) +subject to ⟨πs, 1⟩ = 1 +π(a|s) ≥ 0 +∀ a ∈ A. +The conditions then become +(stationarity) +arcsinh(πs/b) − ηt−1f t +s + λs1 − +� +a∈A +ca +sea = 0, +(complementary slackness) +ca +sπ(a|s) = 0 +∀ a ∈ A, +(primal feasibility) +⟨πs, 1⟩ = 1, π(a|s) ≥ 0 +∀ a ∈ A, +(dual feasibility) +ca +s ≥ 0 +∀ a ∈ A, +where arcsinh(πs/b) is applied element-wise, λs and (ca +s)a∈A are the dual variables and ea is a vector of all 0 with +a 1 in the position associated to a, for all a ∈ A. If we set ca +s = 0 for all a ∈ A, the complementary slackness +and dual feasibility conditions are satisfied and, from the stationarity condition, we obtain that +πt(a|s) = b sinh(ηt−1f t(s, a) − λs) += b sinh(ηt−1f t(s, a)) +� +1 + sinh2(λs) − b cosh(ηt−1f t(s, a)) sinh(λs). +It remains to satisfy the primal feasibility condition, that is find λs such that +b +� +a∈A +sinh(ηt−1f t(s, a) − λs) = 1. +Using properties of the hyperbolic sine, we have the equivalent expression +� +a∈A +sinh(ηt−1f t(s, a)) +� +1 + sinh2(λ) − cosh(ηt−1f t(s, a)) sinh(λs) = 1/b. +Solving for sinh(λs), we obtain +sinh(λs) = β ± +� +β2 + (β2 − α2)(α2 − 1) +β2 − α2 +, +where α = b � +a∈A sinh(ηt−1f t(s, a)) and β = b � +a∈A cosh(ηt−1f t(s, a)). Since cosh(x) ≥ sinh(x) for any x ∈ R, +at least one of the two solutions satisfies the primal feasibility condition. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +14 +B. Deferred proofs from Section 5 +B.1. Proof of the variant of the three-point descent lemma – Lemma 5.1 +Here we provide the proof of Lemma 5.1, the variant of the three-point descent lemma with the integration of +an arbitrary parameterized function, which is the key ingredient for our AMPO analysis. It is a variation of +both Xiao (2022, Lemma 6) and Chen and Teboulle (1993, Lemma 3.2). First, we recall some technical conditions +of the mirror map (Bubeck, 2015, Chapter 4). +Suppose that Y ⊂ R|A| is a closed convex set, we say a function h : Y → R is a mirror map if it satisfies the +following properties: +(i) h is strictly convex and differentiable. +(ii) h is essentially smooth, i.e., the graident of h diverges on the boundary of Y, that is lim +x→∂Y ∥∇h(x)∥ → ∞. +(iii) The gradient of h takes all possible values, that is ∇h(Y) = R|A|. +To prove Lemma 5.1, we also need the following rather simple property satisfied by the Bregman divergence. We +provide its proof for self-containment. +Lemma B.1 (Three-point identity, Lemma 3.1 in Chen and Teboulle (1993)). Let h be a mirror map. Then for +any a, b in the relative interior of Y and c ∈ Y, the following identity holds: +Dh(c, a) + Dh(a, b) − Dh(c, b) = ⟨∇h(b) − ∇h(a), c − a⟩ . +(15) +Proof. Indeed, using the definition of the Bregman divergence Dh, we have +⟨∇h(a), c − a⟩ = h(c) − h(a) − Dh(c, a), +(16) +⟨∇h(b), a − b⟩ = h(a) − h(b) − Dh(a, b), +(17) +⟨∇h(b), c − b⟩ = h(c) − h(b) − Dh(c, b). +(18) +Subtracting (16) and (17) from (18) yields (15). +Now we are ready to prove Lemma 5.1. +Lemma B.2 (Lemma 5.1). Let Y ⊂ R|A| be a closed convex set with ∆(A) ⊆ Y. For any policies π ∈ ∆(A) +and ¯π in the relative interior of ∆(A), any function f θ with θ ∈ Θ, any s ∈ S and for η > 0, we have that, +⟨ηf θ +s − ∇h(¯πs), πs − ˜πs⟩ ≤ Dh(πs, ¯πs) − Dh(˜πs, ¯πs) − Dh(π, ˜πs), +where ˜π is induced by f θ and η according to Definition 4.1, that is, for all s ∈ S, +˜πs = Projh +∆(A) +� +∇h∗(ηf θ +s ) +� += argmin +π′∈∆(A) +Dh(π′ +s, ∇h∗(ηf θ +s )). +(19) +Proof. Since ∇h(Y) = R|A|, for every θ ∈ Θ, there exists p ∈ YS such that for every s ∈ S, we have ∇h(ps) = ηf θ +s +with ps ∈ Y. +So by using the property ∇∗(∇h(·)) = id(·) with id the identity function, (19) can be rewritten as, for all s ∈ S, +˜πs = +argmin +π′∈(∆(A))S Dh(π′ +s, ps) = Projh +∆(A)(ps). +Now plugging a = ¯πs, b = ps and c = πs in the three-point identity lemma B.1, we obtain +Dh(πs, ¯πs) − Dh(πs, ps) + Dh(¯πs, ps) = ⟨∇h(¯πs) − ∇h(ps), ¯πs − πs⟩ . +(20) + +A Novel Framework for PMD with General Parametrization and Linear Convergence +15 +Similarly, plugging a = ¯πs, b = ps and c = ˜πs in the three-point identity lemma B.1, we obtain +Dh(˜πs, ¯πs) − Dh(˜πs, ps) + Dh(¯πs, ps) = ⟨∇h(¯πs) − ∇h(ps), ¯πs − ˜πs⟩ . +(21) +From (20), we have +Dh(πs, ¯πs) − Dh(πs, ps) + Dh(¯πs, ps) += +⟨∇h(¯πs) − ∇h(ps), ¯πs − πs⟩ += +⟨∇h(¯πs) − ∇h(ps), ¯πs − ˜πs⟩ + ⟨∇h(¯πs) − ∇h(ps), ˜πs − πs⟩ +(21) += +Dh(˜πs, ¯πs) − Dh(˜πs, ps) + Dh(¯πs, ps) + ⟨∇h(¯πs) − ∇h(ps), ˜πs − πs⟩ . +By rearranging terms, we have +Dh(πs, ¯πs) − Dh(˜πs, ¯πs) − Dh(πs, ps) + Dh(˜πs, ps) = ⟨∇h(¯πs) − ∇h(ps), ˜πs − πs⟩ . +(22) +Besides, from the Generalized Pythagorean Theorem of the Bregman divergence (proofs may be found in Bubeck +(2015, Lemma 4.1)) and the fact that ˜πs = Projh +∆(A)(ps), we know that +Dh +� +πs, Projh +∆(A)(ps) +� ++ Dh +� +Projh +∆(A)(ps), ps +� +≤ Dh(πs, ps), +that is +Dh(πs, ˜πs) + Dh(˜πs, ps) ≤ Dh(πs, ps) +⇐⇒ +−Dh(πs, ps) + Dh(˜πs, ps) ≤ −Dh(πs, ˜πs). +Plugging the above inequality into the left hand side of (22) yields +Dh(πs, ¯πs) − Dh(˜πs, ¯πs) − Dh(πs, ˜πs) ≥ ⟨∇h(¯πs) − ∇h(ps), ˜πs − πs⟩ , +which concludes the proof with ∇h(ps) = ηf θ +s . +B.2. Bounding errors +In this section, we will bound error terms of the type +Es∼dπ +µ,a∼πs +� +Qt(s, a) + η−1 +t +∇h(πt +s)(a) − f t+1(s, a) +� +, +(23) +where (dπ +µ, π) ∈ {(d⋆ +µ, π⋆), (dt+1 +µ +, πt+1), (d⋆ +µ, πt), (dt+1 +µ +, πt)} . These error terms will appear in the fourthcoming +proofs of our theorems. They directly induce the error floors of our convergence results. +Since ∇h(Y) = R|A|, at time t + 1, there exists pt+1 ∈ YS such that for every s ∈ S, ∇h(pt+1 +s +) = f t+1 +s +. +In the rest of Appendix B, let qt : S × A → R such that, for every s ∈ S, +qt +s := η−1 +t +(∇h(pt+1 +s +) − ∇h(πt +s)) = f t+1 +s +− η−1 +t +∇h(πt +s) ∈ R|A|. +So (23) can be rewritten as +Es∼dπ +µ,a∼πs +� +Qt(s, a) + η−1 +t +[∇h(πt +s)]a − f t+1(s, a) +� += Es∼dπ +µ,a∼πs +� +Qt(s, a) − qt(s, a) +� +. +(24) +To bound it, we separate the errors in critic and actor errors. That is, +Es∼dπ +µ,a∼πs +� +Qt(s, a) − qt(s, a) +� += Es∼dπ +µ,a∼πs +� +Qt(s, a) − �Qt(s, a) +� +� +�� +� +Error for the critic ++ Es∼dπ +µ,a∼πs +� +�Qt(s, a) − qt(s, a) +� +� +�� +� +Error for the actor +. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +16 +Critic error. +The critic error measures how accurate the estimation �Qt of the Q-function Qt at time t. +To bound it, let (ρt)t≥0 be a sequence of distributions over states and actions. By using Cauchy-Schwartz’s +inequality, we have +Es∼dπ +µ,a∼πs +� +Qt(s, a) − �Qt(s, a) +� += +� +s∈S,a∈A +dπ +µ(s)π(a | s) +� +ρt(s, a) +· +� +ρt(s, a)(Qt(s, a) − �Qt(s, a)) +≤ +� +� +� +� +� +s∈S,a∈A +� +dπµ(s)π(a | s) +�2 +ρt(s, a) +· +� +s∈S,a∈A +ρt(s, a)(Qt(s, a) − �Qt(s, a))2 += +� +� +� +�E(s,a)∼ρt +��dπµ(s)π(a | s) +ρt(s, a) +�2� +· E(s,a)∼ρt +� +(Qt(s, a) − �Qt(s, a))2 +� +≤ +� +C1εcritic, +(25) +where the last line is obtained by Assumptions (A3) and (A1). +Actor error. +The actor error evaluates how well the regression solver for θt+1 performs at time t. +To bound it, let (vt)t≥0 be a sequence of distributions over states and actions. Similarly, by using Cauchy- +Schwartz’s inequality, we have +Es∼dπ +µ,a∼πs +� +�Qt(s, a) − qt(s, a) +� += +� +s∈S,a∈A +dπ +µ(s)π(a | s) +� +vt(s, a) +· +� +vt(s, a)( �Qt(s, a) − qt(s, a)) +≤ +� +� +� +� +� +s∈S,a∈A +� +dπµ(s)π(a | s) +�2 +vt(s, a) +· +� +s∈S,a∈A +vt(s, a)( �Qt(s, a) − qt(s, a))2 += +� +� +� +�E(s,a)∼vt +��dπµ(s)π(a | s) +vt(s, a) +�2� +· E(s,a)∼vt +� +( �Qt(s, a) − qt(s, a))2 +� +≤ +� +C2εactor, +(26) +where the last line is obtained by Assumptions (A3) and (A2). +Plugging (25) and (26) into (24) yields the bound +���Es∼dπ +µ,a∼πs +� +Qt(s, a) − qt(s, a) +���� ≤ +� +C1εcritic + +� +C2εactor, +(27) +where (dπ +µ, π) ∈ {(d⋆ +µ, π⋆), (dt+1 +µ +, πt+1), (d⋆ +µ, πt), (dt+1 +µ +, πt)}. +B.3. Quasi-monotonic updates – Proof of Proposition 5.2 +In this section, we show that the AMPO updates guarantee a quasi-monotonic property, i.e. a non-decreasing +property up to certain error floors duo to the actor and the critic errors., which allow us to establish an important +recursion about the AMPO method. First, we recall the performance difference lemma (Kakade and Langford, +2002) which is the second key ingredient for our AMPO analysis. +It is also a well known result in the RL +litterature. Here we use a particular form of the lemma presented in Yuan et al. (2022, Lemma 3). +Lemma B.3 (Performance difference lemma, Lemma 3 in (Yuan et al., 2022)). For any policy π, π′ ∈ ∆(A)S +and µ ∈ ∆(S), +V π(µ) − V π′(µ) = +1 +1 − γ Es∼dπ +µ +�� +Qπ′ +s , πs − π′ +s +�� +. +Recall the notation +τ := +2 +1 − γ ( +� +C1εcritic + +� +C2εactor). + +A Novel Framework for PMD with General Parametrization and Linear Convergence +17 +The following result characterizes the non-decreasing property of AMPO. The bounding errors (27) in the previous +appendix B.2 will be used to prove the lemma. It is a slightly stronger result than Proposition 5.2. +Lemma B.4. Consider the iterates of Algorithm 1, at each time t ≥ 0, we have +V t+1(µ) − V t(µ) ≥ Es∼dt+1 +µ +�Dh(πt+1 +s +, πt +s) + Dh(πt +s, πt+1 +s +) +ηt(1 − γ) +� +− τ. +Proof. Using the variant of the three-point descent lemma 5.1 with ¯π = πt, f θ = f t+1, η = ηt, thus ˜π = πt+1 by +Definition 4.1 and Algorithm 1, and πs = πt +s, we have +⟨ηtqt +s, πt +s − πt+1 +s +⟩ ≤ Dh(πt +s, πt +s) − Dh(πt+1 +s +, πt +s) − Dh(πt +s, πt+1 +s +). +(28) +By rearranging terms and Dh(πt +s, πt +s) = 0, we have +⟨ηtqt +s, πt+1 +s +− πt +s⟩ ≥ Dh(πt+1 +s +, πt +s) + Dh(πt +s, πt+1 +s +) ≥ 0. +(29) +Then, by the performance difference lemma B.3, we have +(1 − γ)(V t+1(µ) − V t(µ)) += +Es∼dt+1 +µ +� +⟨Qt +s, πt+1 +s +− πt +s⟩ +� += +Es∼dt+1 +µ +� +⟨qt +s, πt+1 +s +− πt +s⟩ +� ++ Es∼dt+1 +µ +� +⟨Qt +s − qt +s, πt+1 +s +− πt +s⟩ +� +(28) +≥ +Es∼dt+1 +µ +�Dh(πt+1 +s +, πt +s) + Dh(πt +s, πt+1 +s +) +ηt +� +− +���Es∼dt+1 +µ +� +⟨Qt +s − qt +s, πt+1 +s +− πt +s⟩ +���� +≥ +Es∼dt+1 +µ +�Dh(πt+1 +s +, πt +s) + Dh(πt +s, πt+1 +s +) +ηt +� +− τ(1 − γ), +which is able to conclude the proof by dividing both side by (1−γ) . Indeed, the last line is obtained by bounding +���Es∼dt+1 +µ +� +⟨Qt +s − qt +s, πt+1 +s +− πt +s⟩ +���� through the following result +���Es∼dt+1 +µ +� +⟨Qt +s − qt +s, πt+1 +s +− πt +s⟩ +���� +≤ +���Es∼dt+1 +µ +,a∼πt+1 +s +� +Qt(s, a) − qt(s, a) +���� + +���Es∼dt+1 +µ +,a∼πts +� +Qt(s, a) − qt(s, a) +���� +(27) +≤ +2( +� +C1εcritic + +� +C2εactor) = τ(1 − γ), +(30) +where the first term is upper bounded by √C1εcritic + √C2εactor through (27) with (dπ +µ, π) = (dt+1 +µ +, πt+1), +and the second term is also upper bounded by √C1εcritic + √C2εactor through (27) with (dπ +µ, π) = (dt+1 +µ +, πt), +respectively. +B.4. Main passage – An important recursion about the AMPO method +In this section, we show an important recursion result for the AMPO updates, which will be used for both the +sublinear and the linear convergence analysis of AMPO. +To simplify proofs in the rest of Appendix B, let +νt := +���� +d⋆ +µ +dt+1 +µ +���� +∞ +:= max +s∈S +d⋆ +µ(s) +dt+1 +µ +(s). +For two different time t, t′ ≥ 0, let Dt +t′ denote the expected Bregman divergence between the policy πt and policy +πt′, where the expectation is taken over the discounted state visitation distribution of the optimal policy d⋆ +µ, that +is, +Dt +t′ := Es∼d⋆µ +� +Dh(πt +s, πt′ +s ) +� +. +Similarly, let D⋆ +t denote the expected Bregman divergence between the optimal policy π⋆ and πt, that is, +D⋆ +t := Es∼d⋆ +µ +� +Dh(π⋆ +s, πt +s) +� +. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +18 +Let ∆t := V ⋆(µ) − V t(µ) be the optimality gap. +We can now state the following important recursion result for the AMPO method. +Proposition B.5. Consider the iterates of Algorithm 1, at each time t ≥ 0, we have +Dt+1 +t +(1 − γ)ηt ++ C3 (∆t+1 − ∆t) + ∆t ≤ +D⋆ +t +(1 − γ)ηt +− +D⋆ +t+1 +(1 − γ)ηt ++ (1 + C3)τ. +Proof. Using the three-point descent lemma 5.1 with ¯π = πt, f θ = f t+1, η = ηt, and thus ˜π = πt+1 by Definition +4.1 and Algorithm 1, and πs = π⋆ +s, we have that +⟨ηtqt +s, π⋆ +s − πt+1 +s +⟩ ≤ Dh(π⋆, πt) − Dh(π⋆, πt+1) − Dh(πt+1, πt), +which can be decomposed by +⟨ηtqt +s, πt +s − πt+1 +s +⟩ + ⟨ηtqt +s, π⋆ +s − πt +s⟩ ≤ Dh(π⋆, πt) − Dh(π⋆, πt+1) − Dh(πt+1, πt). +Taking expectation with respect to the distribution d⋆ +µ over states and dividing both side by ηt, we have +Es∼d⋆ +µ +� +⟨qt +s, πt +s − πt+1 +s +⟩ +� ++ Es∼d⋆ +µ +� +⟨qt +s, π⋆ +s − πt +s⟩ +� +≤ 1 +ηt +(D⋆ +t − D⋆ +t+1 − Dt+1 +t +). +(31) +We lower bound the two terms on the left hand side of (31) separately. +For the first one, we have that +Es∼d⋆µ +� +⟨qt +s, πt +s − πt+1 +s +⟩ +� +(29) +≥ +���� +d⋆ +µ +dt+1 +µ +���� +∞ +Es∼dt+1 +µ +� +⟨qt +s, πt +s − πt+1 +s +⟩ +� += +νt+1Es∼dt+1 +µ +� +⟨Qt +s, πt +s − πt+1 +s +⟩ +� ++ νt+1Es∼dt+1 +µ +� +⟨qt +s − Qt +s, πt +s − πt+1 +s +⟩ +� +Lemma B.3 += +νt+1(1 − γ) +� +V t(µ) − V t+1(µ) +� ++ νt+1Es∼dt+1 +µ +� +⟨qt +s − Qt +s, πt +s − πt+1 +s +⟩ +� +(30) +≥ +νt+1(1 − γ) +� +V t(µ) − V t+1(µ) +� +− νt+1τ(1 − γ) += +νt+1(1 − γ) (∆t+1 − ∆t) − νt+1τ(1 − γ). +For the second one, we have that +Es∼d⋆ +µ +� +⟨qt +s, π⋆ +s − πt +s⟩ +� += +Es∼d⋆ +µ +� +⟨Qt +s, π⋆ +s − πt +s⟩ +� ++ Es∼d⋆ +µ +� +⟨qt +s − Qt +s, π⋆ +s − πt +s⟩ +� +Lemma B.3 += +∆t(1 − γ) + Es∼d⋆µ +� +⟨qt +s − Qt +s, π⋆ +s − πt +s⟩ +� +≥ +∆t(1 − γ) − τ(1 − γ), +where the upper bound of +���Es∼d⋆µ [⟨qt +s − Qt +s, π⋆ +s − πt +s⟩] +��� is τ(1 − γ), similar to the derivation of (30), by applying +(27) twice with (dπ +µ, π) = (d⋆ +µ, π⋆) and (dπ +µ, π) = (d⋆ +µ, πt), respectively. +Plugging the two bounds in (31), dividing both side by (1 − γ) and rearranging terms, we obtain +Dt+1 +t +(1 − γ)ηt ++ νt+1 (∆t+1 − ∆t − τ) + ∆t ≤ +D⋆ +t +(1 − γ)ηt +− +D⋆ +t+1 +(1 − γ)ηt ++ τ. +From Proposition 5.2, we have that ∆t+1 − ∆t − τ ≤ 0. Consequencely, since νt+1 ≤ C3 by the definition of C3 +in Assumption (A4), one can lower bound the left hand side of the above inequality by replacing νt+1 by C3, +that is, +Dt+1 +t +(1 − γ)ηt ++ C3 (∆t+1 − ∆t − τ) + ∆t ≤ +D⋆ +t +(1 − γ)ηt +− +D⋆ +t+1 +(1 − γ)ηt ++ τ, +which concludes the proof. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +19 +B.5. Proof of the sublinear convergence analysis +In this section, we derive the sublinear convergence result of Theorem 5.3 with non-dereasing step-size. +Proof. Starting from Proposition B.5 +Dt+1 +t +(1 − γ)ηt ++ C3 (∆t+1 − ∆t) + ∆t ≤ +D⋆ +t +(1 − γ)ηt +− +D⋆ +t+1 +(1 − γ)ηt ++ (1 + C3)τ. +If ηt ≤ ηt+1, +Dt+1 +t +(1 − γ)ηt ++ C3 (∆t+1 − ∆t) + ∆t ≤ +D⋆ +t +(1 − γ)ηt +− +D⋆ +t+1 +(1 − γ)ηt+1 ++ (1 + C3)τ. +Summing up from 0 to T − 1 and dropping positive terms on the left hand side and negative terms on the right +hand side, we have +� +t 0 and a nonnegative sequence {at}t≥0 satisfies +at+1 ≤ αat + b +∀t ≥ 0. +Then for all t ≥ 0, +at ≤ αta0 + +b +1 − α. +The proof of the linear convergence analysis follows by applying this fact with at = ∆t+ +D⋆ +t +(1−γ)ηt(C3−1), α = 1− 1 +C3 +and b = +� +1 + +1 +C3 +� +τ. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +20 +B.7. Discussion on the Distribution Mismatch Coefficients and the Concentrability Coefficients +In our convergence analysis, Assumptions (A3) and (A4) involve concentrability coefficients C1, C2 and the distri- +bution mismatch coefficients C3, which are potentially large. We give extensive discussions on them, respectively. +Distribution mismatch coefficient C3 +As mentioned right after (A4), we have that +max +s∈S +d⋆ +µ(s) +dtµ(s) ≤ +1 +1 − γ max +s∈S +d⋆ +µ +µ := C′ +3, +which is a sufficient upper bound for C3. As discussed in Yuan et al. (2022, Section 5.2), +1/(1 − γ) ≤ C′ +3 ≤ 1/((1 − γ) min +s +µ(s)). +The upper bound 1/((1 − γ) mins µ(s)) of C′ +3 is very pessimistic and the lower bound C′ +3 = 1/(1 − γ) is often +achieved. +Furthermore, if µ does not have full support on the state space, i.e. 1/((1 − γ) mins µ(s)) might be infinity, one +can always convert the convergence guarantees for some state distribution µ′ ∈ ∆(S) with full support such that +V ⋆(µ) − V T (µ) = +� +s∈S +µ(s) +µ′(s)µ′(s) +� +V ⋆(s) − V T (s) +� +≤ +���� +µ +µ′ +���� +∞ +� +V ⋆(µ′) − V T (µ′) +� +. +Then by the linear convergence result of Theorem 5.3, we only need convergence guarantee on V ⋆(µ′) − V T (µ′) +with an arbitrary distribution µ′ such that C′ +3 is finite. We refer to Yuan et al. (2022, Section 5.2) for more +discussions on the distribution mismatch coefficient. +Concentrability coefficients C1 and C2 +As discussed in Yuan et al. (2022, Section 5.2), the issue of having +(potentially large) concentrability coefficients is unavoidable in all the fast linear convergence analysis of approx- +imate PMD due to the actor and critic errors. In particular, for these coefficients to have finite upper bounds, it +is important that ρt and vt cover well the state and action spaces so that the upper bounds are independent to +t. However, such upper bounds are very pessimistic. Indeed, when πt and πt+1 converge to π⋆, one reasonable +choice of (ρt, vt) is to choose (ρt, vt) ∈ {(d⋆ +µ, π⋆), (dt+1 +µ +, πt+1), (d⋆ +µ, πt), (dt+1 +µ +, πt)} such that C1 and C2 are closed +to 1. We refer to Yuan et al. (2022, Section 5.2) for more discussions on the concentrability coefficients. + +A Novel Framework for PMD with General Parametrization and Linear Convergence +21 +C. Neural network parametrization +In this appendix we give the proof for Theorem 5.5, which is based from the following result by Ji et al. (2019, +Theorem E.1). Let g : Rn → R be given and define the modulus of continuity ωg as +ωg(δ) := sup +s∈Rn{f(s) − f(s′) : max(∥s∥2 , ∥s′∥2) ≤ 1 + δ, ∥s − s′∥2 ≤ δ}. +Theorem C.1 (Ji et al. (2019)). Let g : Rd → R, δ > 0 and ωg(δ) be as above and define for s ∈ Rn +M := +sup +∥s∥≤1+δ +|g(s)|, +g|δ(s) = f(s)1[∥s∥ ≤ 1 + δ], +α := +δ +√ +δ + +� +2 log(2M/ωg(δ)) +. +Let Gα be a gaussian distribution on Rn with mean 0 and variance α2I. Define l = g|δ∗Gα (gaussian convolution) +with Fourier transform �l satisfying radial decomposition �l(w) = |�l(w)| exp(2πiθh(w)). +Let P be a probability +distribution supported on ∥s∥ ≤ 1. Additionally define +c := g(0)g(0) +� +|�l(w)| +� +cos(2π(θl(w) − ∥w∥2)) − 2π ∥w∥2 sin(2π(θl(w) − ∥w∥2)) +� +dw +a = +� +w|�l(w)|dw +r = √n + 2 +� +log +24π2( +√ +d + 7)2 ��g|δ +�� +L1 +ωg(δ) +p := 4π2|�l(w)| cos(2π(∥w∥2 − b))1[|b| ≤ ∥w∥ ≤ r], +and for convenience create fake (weight, bias, sign) triples +(w, b, x)m+1 := (0, c, m sign(c)), +(w, b, x)m+2 := (a, 0, m), +(w, b, x)m+3 := (−a, 0, −m). +Then +|c| ≤ M + 2√n +��g|δ +�� +L1 (2πα2)−d/2, +∥p∥L1 ≤ 2 +��g|δ +�� +L1 +� +(2π)3n +(2πα2)n+1 , +and with probability at least 1 − 3λ over a draw of ((sj, wj, bj))m +j=1 from p (see on top how to sample from signed +densities) +������ +g − 1 +m +m+3 +� +j=1 +xa +j σ(⟨wj, s⟩ + bj) +������ +L2(P ) +≤ 3ωg(δ) + r ∥p∥L1 +√m +� +1 + +� +2 log(1/λ) +� +. +Theorem 5.5 is then obtained by setting g = �Qt −η−1 +t +∇h(πt) and using a union bound over A and (0, . . . , T −1). + diff --git a/RdFPT4oBgHgl3EQfpzV2/content/tmp_files/load_file.txt b/RdFPT4oBgHgl3EQfpzV2/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0ec5fac12ccbbb0f9086527766eba93aa4c374b --- /dev/null +++ b/RdFPT4oBgHgl3EQfpzV2/content/tmp_files/load_file.txt @@ -0,0 +1,912 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf,len=911 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content='13139v1 [stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content='ML] 30 Jan 2023 A Novel Framework for Policy Mirror Descent with General Parametrization and Linear Convergence Carlo Alfano 1 Rui Yuan 2 Patrick Rebeschini 1 Abstract Modern policy optimization methods in ap- plied reinforcement learning are often in- spired by the trust region policy optimiza- tion algorithm, which can be interpreted as a particular instance of policy mirror descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' While theoretical guarantees have been es- tablished for this framework, particularly in the tabular setting, the use of a general parametrization scheme remains mostly un- justified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' In this work, we introduce a novel framework for policy optimization based on mirror descent that naturally accommodates general parametrizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' The policy class in- duced by our scheme recovers known classes, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' tabular softmax, log-linear, and neural policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' It also generates new ones, depend- ing on the choice of the mirror map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' For a general mirror map and parametrization function, we establish the quasi-monotonicity of the updates in value function, global linear convergence rates, and we bound the total variation of the algorithm along its path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' To showcase the ability of our framework to ac- commodate general parametrization schemes, we present a case study involving shallow neu- ral networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' Introduction Policy optimization represents one of the most widely- used classes of algorithms for reinforcement learning (RL).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' Among policy optimization techniques, policy gradient (PG) methods are gradient-based algorithms that optimize the policy over a parametrized policy class and have emerged as a popular class of algo- rithms for RL (Williams and Peng, 1991;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' Sutton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=', 1Department of Statistics, University of Oxford, United Kingdom 2LTCI, T´el´ecom Paris and Institut Polytechnique de Paris, France.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/RdFPT4oBgHgl3EQfpzV2/content/2301.13139v1.pdf'} +page_content=' Correspondence to: Carlo Alfano 0 +Thus, +d +dxh(x) < 0 when, +cos 1 +x sin 2 +x − 2 sin 1 +x cos 2 +x > 0 +We can continue as follows, using the double angle identities: +cos 1 +x sin 2 +x − 2 sin 1 +x cos 2 +x = cos 1 +x +� +2 sin 1 +x cos 1 +x +� +− 2 sin 1 +x +� +cos2 1 +x − 1 +� += 2 sin 1 +x cos2 1 +x − 2 sin 1 +x cos2 1 +x + 2 sin 1 +x += 2 sin 1 +x +> 0 +This is trivially true for x ≥ 2 +π . Hence, +d +dxh(x) < 0. As a result ρℓ(0) monotonically +decreases when ℓ increases. +□ +Corollary 5.1 By lemma 3.1, ρ0(0) = 1 +2, and by lemma 4 limℓ→∞ ρℓ(0) = 1 +4. Thus, +since ρℓ(0) is monotonically decreasing with respect to ℓ (lemma 5), it is clear that +ρℓ(0) ∈ +� 1 +4, 1 +2 +� + +12 +Lemma 6 Similarly, we have ρℓ(2ℓ − 1) ∈ +� 1 +2, 3 +4 +� +. +Proof Let us begin by showing: +ρℓ(2ℓ − 1) = 1 − ρℓ(0) +Plugging in 2ℓ − 1, +ρℓ(2ℓ − 1) = tℓ(2ℓ − 1) − tℓ(2ℓ − 2) +tℓ(2ℓ) − tℓ(2ℓ − 2) += (1 − tℓ(1)) − (1 − tℓ(2)) +1 − (1 − tℓ(2)) +(Lemma 3, Property 2) += tℓ(2) − tℓ(1) +tℓ(2) += 1 − tℓ(1) +tℓ(2) += 1 − ρℓ(0) +As was shown in corollary 5.1, ρℓ(0) ∈ +� 1 +4, 1 +2 +� +, thus ρℓ(2ℓ − 1) ∈ +� 1 +2, 3 +4 +� +. +□ +So we have figured out the bounds of ρℓ for the extreme values of its domain. +The next step will be to show that ρℓ(k) < ρℓ(k + 1) for all valid k. This will +give us a chain of inequalities, which will bound all ρ. +Lemma 7 The following statement relates an inequality of weights to an inequality +of previous-to-new-left-interval-ratios: +wℓ(2k) +wℓ(2k + 1) < wℓ(2k + 2) +wℓ(2k + 3) ⇐⇒ ρℓ−1(k) < ρℓ−1(k + 1) +Proof Applying some basic algebra, +wℓ(2k) +wℓ(2k + 1) < wℓ(2k + 2) +wℓ(2k + 3) ⇐⇒ wℓ(2k)wℓ(2k + 3) < wℓ(2k + 2)wℓ(2k + 1) +Adding wℓ(2k)wℓ(2k + 2) (> 0 by remark 4) to both sides results in, +wℓ(2k)wℓ(2k + 2) + wℓ(2k)wℓ(2k + 3) < wℓ(2k)wℓ(2k + 2) + wℓ(2k + 2)wℓ(2k + 1) +Which can be factored into, +wℓ(2k)(wℓ(2k + 2) + wℓ(2k + 3)) < wℓ(2k + 2)(wℓ(2k) + wℓ(2k + 1)) +Rearranging gives, +wℓ(2k) +wℓ(2k) + wℓ(2k + 1) < +wℓ(2k + 2) +wℓ(2k + 2) + wℓ(2k + 3) +(2) +Finally, by definition of previous-to-new-left-interval-ratio ρ, +ρℓ(k) < ρℓ(k + 1) +□ + +13 +This relation will be helpful in showing ρℓ−1(k) < ρℓ−1(k + 1), as wℓ is an +easier function to manage. +Definition 9 Fix ℓ, let us define the following real number extensions of tℓ, T(x) +and wℓ, W(x). Where the domains and codomains of T(x) and W(x) are R. +T(x) = sin2 �π +2 · x +2ℓ +� +and, +W(x) = T(x + 1) − T(x) +where x ∈ [0, 2ℓ − 1] is real. +Corollary 7.1 Fix ℓ. If k ∈ N, 0 ≤ k ≤ 2ℓ, then T(k) = tℓ(k), and W(k) = wℓ(k). +With wℓ and tℓ extended to the real numbers, it is possible to apply tools +from calculus, making the next few proofs substantially easier. +Remark 4 Clearly, T(x) increases as x increases. As a result W(x) > 0 for x ∈ +[0, 2ℓ − 1]. +Remark 5 With the new function W, it is possible to extend the expression seen in +lemma 7, +W(x) +W(x + 1) < W(x + 2) +W(x + 3), +x ∈ +� +0, 2ℓ − 1 +� +With corollary 7.1 in mind, it is easy to see that if this relation holds in this extended +context, then it will also hold for the relation with wℓ. +Remark 6 The relation can be simplified further by considering, +W(x) +W(x + 1) < W(x + 1) +W(x + 2) +Assuming this holds for all x, it is very obvious that, +W(x) +W(x + 1) < W(x + 1) +W(x + 2) ⇒ +W(x) +W(x + 1) < W(x + 2) +W(x + 3) +So proving +W (x) +W (x+1) < W (x+1) +W (x+2), will be sufficient to show ρℓ(k) < ρℓ(k + 1). +Lemma 8 Fix ℓ. The derivative of W(x) is greater than the derivative of W(x+1): +d +dxW(x) > d +dxW(x + 1) +x ∈ +� +0, 2ℓ − 1 +� +Proof We first find +d +dxW(x + r), for an arbitrary r ∈ R. +d +dxT(x + r) = d +dx sin2 �π +2 +�x + r +2ℓ +�� + +14 += d +dx sin2 �α +2 (x + r) +� +� +α = π +2L +� += α sin(α(x + r)) +Thus, we have +d +dxW(x + r): +d +dxW(x + r) = d +dxT(x + r + 1) − d +dxT(x + r) += α +� +sin(α(x + r + 1)) − sin(α(x + r)) +� +Finally, we show: +d +dxW(x) > d +dxW(x + 1) ⇐⇒ α(sin(α(x + 1)) − sin(αx))) +> α(sin(α(x + 2)) − sin(α(x + 1))) +⇐⇒ 2 sin(α(x + 1)) − sin(αx) − sin(α(x + 2)) > 0 +Let u = αx, v = α then, +2 sin(u + v) − sin(u) − sin(u + 2v) = 2(sin u cos v + cos u sin v) − sin u +− (sin u cos 2v + cos u sin 2v) += 2 sin u cos v + 2 cos u sin v − sin u +− sin u(2 cos2 v − 1) − cos u(2 sin v cos v) += 2 sin u cos v + 2 cos u sin v − sin u +− 2 sin u cos2 v + sin u − 2 sin v cos2 v += 2 sin u cos v(1 − cos v) + 2 cos u sin v(1 − cos v) += 2(sin u cos v + cos u sin v)(1 − cos v) += 2 sin(u + v)(1 − cos v) += 2 sin(αx + α)(1 − cos α) += 2 sin +� +π · x + 1 +2ℓ +� � +1 − cos π +2ℓ +� +It is easily shown that 1 − cos π +2ℓ > 0 for ℓ ≥ 0, thus, the result holds when: +sin +� +π x + 1 +2ℓ +� +> 0 +This is equivalent to when 0 < x+1 +2ℓ +< 1. Hence, +d +dxW(x) > d +dxW(x + 1), +if x ∈ [0, 2L − 1] +□ +Lemma 9 In the real domain, the ratio of the previous width to the current width +increases monotonically, that is, +W(x) +W(x + 1) < W(x + 1) +W(x + 2), +x = +� +0, 2ℓ − 1 +� +. + +15 +Proof By lemma 8, the derivative of current width W ′(x + 1) is lower than the +derivative of the previous width W ′(x). This implies that, +� x+2 +x+1 +W ′(z)dz < +� x+1 +x +W ′(z)dz. +Let ∆W(x) be equal to +� x+1 +x +W ′(z)dz. Since W(x) > 0 (see Remark 4), multiplying +both sides by W(x), it is apparent that W(x)∆W(x + 1) is less than W(x)∆W(x). +Adding W(x)2 + W(x)∆W(x) to both sides and ∆W(x)2 (which is > 0) to the +right hand side shows that W(x)2 + W(x)∆W(x) + W(x)∆W(x + 1) is less than +W(x)2 + 2W(x)∆W(x) + ∆W(x)2. Factoring gives, +W(x) [W(x) + ∆W(x) + ∆W(x + 1)] < (W(x) + ∆W(x))2 +Note that W(x) + ∆W(x) is equal to +� x +0 W ′(z)dz + +� x+1 +x +W ′(z)dz which is equal to +� x+1 +0 +W ′(z)dz. By a similar argument it can shown that W(x)+∆W(x)+∆W(x+1) +is equal to +� x+2 +0 +W ′(z)dz. Hence, it follows that, +W(x) +� x+2 +0 +W ′(z)dz < +�� x+1 +0 +W ′(z)dz +�2 +. +By the fundamental theorem of calculus, W(x)W(x + 2) is less than W(x + 1)2. +Rearranging yields, +W(x) +W(x + 1) < W(x + 1) +W(x + 2). +□ +Corollary 9.1 The following relation holds, ρℓ(k) < ρℓ(k + 1) for all k ∈ [0, 2ℓ − 2]. +Proof This is a direct result of Lemmas 7 and 9. +□ +Theorem 10 For all ℓ, k ∈ N, 0 ≤ k ≤ 2ℓ − 1, ρℓ(k) is bounded with, +ρℓ(k) ∈ +�1 +4 + ϵ, 3 +4 − ϵ +� +, +ϵ > 0 +Proof By Corollary 5.1 we have ρℓ(0) ∈ +� 1 +4 + ϵ, 1 +2 +� +, thus ρℓ(0) > 1 +4. By Lemma 6 +we have ρℓ(2ℓ − 1) ∈ +� 1 +2, 3 +4 − ϵ +� +, so ρℓ(2ℓ − 1) < 3 +4. Finally, by Corollary 9.1, we +have ρℓ(k) < ρℓ(k + 1) for the current range of k = 0, . . . , 2ℓ − 1. This results in the +following chain of inequalities: +1 +4 < ρℓ(0) < ρℓ(1) < · · · < ρℓ +� +2ℓ − 1 +� +< 3 +4 +□ + +16 +3.2.2 Area Estimation +Various important properties about the partitioning scheme Pℓ have been the +primary focus. This section focuses on estimating the area under bn,m given a +partition Pℓ, using concepts from Darboux integrals. The end goal is to show +that error can become arbitrarily small given a large enough ℓ. This section +also lays the foundations for a better, and more realistic upper bound for error +in estimating Shapley values, which is covered in a later section. +Definition 10 The supremum of a set S ⊆ R is, +sup(S) = min +x∈R x ≥ s, +∀s ∈ S. +The infimum of S is, +inf(S) = max +x∈R x ≤ s, +∀s ∈ S. +Definition 11 Darboux sums takes a partition P = (z0, z1, . . . , zn) of an interval +[a, b], where a = z0 < z1 < · · · < zn = b, and a function f which maps (a, b) to R. +Each interval [zi, zi+1] is called a subinterval. Let +Mi = +sup +x∈[zi,zi+1] +f(x), +and +mi = +inf +x∈[zi,zi+1] f(x), +i = 0, . . . , n − 1 +The upper and lower bounds of a sub interval’s area are, +AU(f, [zi, zi+1]) = (zi+1 − zi)Mi, +and +AL(f, [zi, zi+1]) = (zi+1 − zi)mi +respectively. The upper Darboux sum is: +U(f, P) = +n−1 +� +i=0 +AU(f, [zi, zi+1]), +and the lower Darboux sum is: +L(f, P) = +n−1 +� +i=0 +AL(f, [zi, zi+1]), +There is a geometric interpretation of the Darboux sums. Each subinterval +has a rectangle width corresponding to the subinterval witch, and a height +corresponding to either the supremum or infimum of f(x). The upper Darboux +sum is the sum of the areas of these rectangles, where their heights correspond +to their suprema. +Remark 7 Suppose instead of taking Mi or mi, we took arbitrary elements from each +subinterval to represent the heights of the rectangles: +n−1 +� +i=0 +(zi+1 + zi)f(xi) +xi ∈ [zi, zi+1]. +(3) +By definitions of supremum and infimum (Definition 10), it is clear that for all +i = 0, . . . , n − 1, mi ≤ xi ≤ Mi. It follows that for all i, the areas AL(f, [zi, zi+1]) + +17 +are less than or equal to the areas (zi+1 − zi)f(xi) which are less than or equal to +the areas AU(f, [zi, zi+1]). This gives, +L(f, P) ≤ +n−1 +� +i=0 +(zi+1 + zi)f(xi) ≤ U(f, P). +As a result, there is a lot of freedom in choosing which part of the subinterval to +assess f(x) +Lemma 11 For any subinterval [zi, zi+1], and function f that maps elements of +[zi, zi+1] to R, +AL(f, [zi, zi+1]) ≤ +zi+1 +� +zi +f(x)dx ≤ AU(f, [zi, zi+1]) +Proof Consider AL(f, [zi, zi+1]) which is equal to (zi+1 − zi)mi where mi is equal +to infx∈[zi,zi+1] f(x). Note that, +(zi+1 − zi)mi = +zi+1 +� +zi +midx. +By Definition 10, for all x ∈ [zi, zi+1], f(x) greater or equal to mi. Thus, +zi+1 +� +zi +f(x)dx ≥ +zi+1 +� +zi +midx = AL(f, [zi, zi+1]). +A nearly identical argument can be used to show +� zi+1 +zi +f(x)dx ≤ AU(f, [zi, zi+1]). +□ +The next lemma bounds the error resulting from estimating the area under +a subinterval by picking a random point on that subinterval and making a +rectangle. +Lemma 12 Take an arbitrary point y in the subinterval [zi, zi+1], then, +������ +zi+1 +� +zi +f(x)dx − (zi+1 − zi)f(y) +������ +≤ AU(f, [zi, zi+1]) − AL(f, [zi, zi+1]) +Proof Recall that by Definition 11 AL(f, [zi, zi+1]) is equal to (zi+1 − zi)mi, and +AU(f, [zi, zi+1]) is equal to (zi+1 − zi)Mi, where, +mi = +inf +x∈[zi,zi+1] f(x), +and +Mi = +sup +x∈[zi,zi+1] +f(x). + +18 +By +Definition +10, +mi +≤ +f(y) +≤ +Mi, +which +implies +AL(f, [zi, zi+1]) +≤ +(zi+1 − zi)f(y) ≤ AU(f, [zi, zi+1]). Let us assume (zi+1 − zi)f(y) be less than +or equal to +� zi+1 +zi +f(x)dx, meaning +����� +zi+1 +� +zi +f(x)dx − (zi+1 − zi)f(y) +����� is equal to +zi+1 +� +zi +f(x)dx +− +(zi+1 − zi)f(y). Then, since Lemma 11 shows +� zi+1 +zi +f(x)dx ≤ +AU(f, [zi, zi+1]), it follows that +zi+1 +� +zi +f(x)dx − (zi+1 − zi)f(y) ≤ AU(f, [zi, zi+1]) − (zi+1 − zi)f(y). +As shown above, it is also the case that (zi+1 − zi)f(y) is greater than or equal to +AL(f, [zi, zi+1]). As a result, +AU(f, [zi, zi+1]) − (zi+1 − zi)f(y) ≤ AU(f, [zi, zi+1]) − AL(f, [zi, zi+1]). +With similar argumentation, we can show this also holds for (zi+1−zi)f(y) is greater +than or equal to +� zi+1 +zi +f(x)dx. +□ +Remark 8 Consider the following approach to approximating area under f on the +interval [zi, zi+1]: choose an arbitrary x ∈ [zi, zi+1], multiply by width of interval. +Clearly, in this context, the error is equal to +���(zi+1 − zi)f(x) − +� zi+1 +zi +f(x)dx +���. By +Lemma 12, it follows that AU(f, [zi, zi+1])−AL(f, [zi, zi+1]) can be used as an upper +bound for error when using this this approach. +Corollary 12.1 Given a partition Pℓ = +� +tℓ(0), . . . , tℓ +� +2ℓ�� +, the upper bound of +error when approximating area under bn,m(x) over the kth subinterval is defined as: +UEn,m(ℓ, k) = (tℓ(k + 1) − tℓ(k)) +� +U(bn,m, [tℓ(k), tℓ(k+1)])−L(bn,m, [tℓ(k), tℓ(k+1)]) +� +Remark 9 Note that bn,m(x) has at most one local maximum for x ∈ [0, 1], +for all valid n, m. As a result, given a partition P of [0, 1], on every subinter- +val of P (except when the subinterval contains the local maximum, which occurs +in only one subinterval), bn,m(x) is either monotonically increasing or decreasing. +For each subinterval [zi, zi+1] of P on which bn,m(x) is monotonically increasing, +L(bn,m, [zi, zi+1]) is equal to bn,m(zi) and U(bn,m, [zi, zi+1]) is equal to bn,m(zi+1). +When bn,m(x) is decreasing over [zi, zi+1], L(bn,m, [zi, zi+1]) is equal to bn,m(zi+1) +and U(bn,m, [zi, zi+1]) is equal to bn,m(zi). +Corollary +12.2 Given a partition Pℓ += +� +tℓ(0), . . . , tℓ +� +2ℓ�� +, the following +monotonic-assumption upper bound of error when approximating the area under +bn,m(x) over the kth subinterval is defined as: +UEn,m(ℓ, k) = (tℓ(k + 1) − tℓ(k)) +��bn,m(tℓ(k + 1)) − bn,m(tℓ(k)) +�� +This definition makes easy finding an error upper bound for all intervals +except one, the one containing the local maximum. + +19 +Fig. 6 Example visualization of UEn,e’s change during refinement for arbitrary function. +Corollary 12.3 If bn,m(x) is monotonic over [tℓ(k), tℓ(k + 1)], then UEn,m(ℓ, k) is +equal to UEn,m(ℓ, k). +Lemma 13 Let us suppose a partition Pℓ−1 is refined to partition Pℓ. Given a +subinterval [tℓ−1(k), tℓ−1(k + 1)] of Pℓ, the monotonic-assumption upper bound of +error is reduced over that interval by a factor of at least 3/4, that is: +3 · UEn,m(ℓ − 1, k) +4 +> +� +UEn,m(ℓ, 2k) + UEn,m(ℓ, 2k + 1) +� +Proof Let us consider Figure 6, there are the following widths, +x = tℓ−1(k + 1) − tℓ−1(k), +y = |bn,m(tℓ−1(k + 1)) − bn,m(tℓ−1(k))| , +where x and y represent the width and change in height of the previous subinterval. +Also there are the following widths, +x1 = tℓ(2k + 1) − tℓ(2k), +y1 = |bn,m(tℓ(2k + 1)) − bn,m(tℓ(2k))| , +where x1 and y1 represent the width and change in height of the left part of split +previous subinterval. It follows that the width and change in height of the right part +of the split subinterval have values, +x2 = x − x1, +y2 = y − y1. +It is concluded that, +UEn,m(ℓ − 1, k) = x · y, +UEn,m(ℓ, 2k) = x1 · y1, and +UEn,m(ℓ, 2k + 1) = x2 · y2. +Finally, let us define x = x1 +x , and y = y1 +y . Note that when considering UEn,m(ℓ−1, k), +x = x1 +x = tℓ(2k + 1) − tℓ(2k) +tℓ(2k + 2) − tℓ(2k) = ρℓ−1(k), + +a +y2 +9 +Y1 +1 +220 +by Remark 3. Thus by Theorem 10, 1 +4 < x < 3 +4. Simultaneously, 0 ≤ y ≤ 1. We +proceed by simplifying the following expression +UEn,m(ℓ, 2k) + UEn,m(ℓ, 2k + 1) +UEn,m(ℓ − 1, k) += x1y1 + x2y2 +xy +Plugging in the definition for x2 and y2, the above is equivalent to, +x1y1 + (x − x1)(y − y1) +xy +Doing the product and applying the definitions for x and y yields +2x y − y − x + 1. +Rearranging, it can be shown that the above is equal to +2 +� +x − 1 +2 +� � +y − 1 +2 +� ++ 1 +2. +Let us assume that (y − 1/2) is positive without loss of generality. Then assigning +to x and y their respective maximum values, the following inequality is obtained, +UEn,m(ℓ, 2k) + UEn,m(ℓ, 2k + 1) +UEn,m(ℓ − 1, k) +< 2 +�3 +4 − 1 +2 +� � +1 − 1 +2 +� ++ 1 +2 = 3 +4 +A similar argument can be used for when (y − 1/2) is negative, and the above is +trivially correct for (y − 1/2) equals zero. +□ +Corollary 13.1 For all monotonic sub-intervals [tℓ(k), tℓ(k + 1)], the upper bound +of error UEn,m(ℓ, k) is reduced by at least 25% when the partition is refined from Pℓ +to Pℓ+1. +Proof This follows as a direct result of Lemma 13. +□ +Next, let us consider error over the whole of the approximation. +Definition 12 We denote the sum of upper bounds for error over all sub-intervals +as: +SUEn,m(ℓ) = +2ℓ−1 +� +k=0 +UEn,m(ℓ, k), +and the sum of upper bounds for error over all sub-intervals with the monotonic +assumption for all sub-intervals as: +SUEn,m(ℓ) = +2ℓ−1 +� +k=0 +UEn,m(ℓ, k). +To discuss how error evolves with respect to granularity of our partition, +having an upper bound for initial error is critical. +Definition 13 We denote initial error as, +σn,m = SUEn,m(0) + +21 +Remark 10 Using the first and second derivative tests, one can verify that bn,m(m/n) +is the supremum of bn,m(x) for x ∈ [0, 1]. One can also easily show the infimum of +bn,m(x) is 0. Thus, +σn,m = SUEn,m(0) = UEn,m(0, 0) = bn,m +�m +n +� +Lemma 14 +SUEn,m(ℓ + 1) ≤ 3 +4SUEn,m(ℓ) +Proof By Definition 12, +SUEn,m(ℓ + 1) = +2ℓ+1−1 +� +k=0 +UEn,m(ℓ + 1, k). +Rearranging the values in the sum yields, +SUEn,m(ℓ + 1) = +2ℓ−1 +� +k=0 +� +UEn,m(ℓ + 1, 2k) + UEn,m(ℓ + 1, 2k + 1) +� +. +Hence by Lemma 13, +SUEn,m(ℓ + 1) < +2ℓ−1 +� +k=0 +�3 +4UEn,m(ℓ, k) +� += 3 +4SUEn,m(ℓ). +□ +References +[1] Goodman, B., Flaxman, S.: European Union regulations on algorithmic +decision-making and a “right to explanation”. AI magazine 38(3), 50–57 +(2017) +[2] Rudin, C.: Stop explaining black box machine learning models for high +stakes decisions and use interpretable models instead. Nature Machine +Intelligence 1(5), 206–215 (2019) +[3] Lundberg, S.M., Lee, S.-I.: A unified approach to interpreting model +predictions. Advances in neural information processing systems 30 (2017) +[4] Matsui, Y., Matsui, T.: NP-completeness for calculating power indices of +weighted majority games. Theoretical Computer Science 263(1-2), 305– +310 (2001) +[5] Prasad, K., Kelly, J.S.: NP-completeness of some problems concerning +voting games. International Journal of Game Theory 19(1), 1–9 (1990) +[6] Castro, J., G´omez, D., Tejada, J.: Polynomial calculation of the Shapley +value based on sampling. Computers & Operations Research 36(5), 1726– +1730 (2009) + +22 +[7] Biamonte, J., Wittek, P., Pancotti, N., Rebentrost, P., Wiebe, N., Lloyd, +S.: Quantum machine learning. Nature 549(7671), 195–202 (2017) +[8] Chen, S.Y.-C., Yang, C.-H.H., Qi, J., Chen, P.-Y., Ma, X., Goan, H.- +S.: Variational quantum circuits for deep reinforcement learning. IEEE +Access 8, 141007–141024 (2020) +[9] Aumann, R.J.: Some non-superadditive games, and their Shapley values, +in the Talmud. International Journal of Game Theory 39 (2010) +[10] Winter, E.: The Shapley value. Handbook of game theory with economic +applications 3, 2025–2054 (2002) +[11] Hart, S.: Shapley value. In: Game Theory, pp. 210–216. Springer, (1989) +[12] Shapley, L.S.: A Value for N-Person Games. RAND Corporation, Santa +Monica, CA (1952). https://doi.org/10.7249/P0295 +[13] Lipovetsky, S., Conklin, M.: Analysis of regression in game theory +approach. Applied Stochastic Models in Business and Industry 17(4), +319–330 (2001) +[14] Van den Broeck, G., Lykov, A., Schleich, M., Suciu, D.: On the tractability +of SHAP explanations. Journal of Artificial Intelligence Research 74, 851– +886 (2022) +[15] Bertossi, L., Li, J., Schleich, M., Suciu, D., Vagena, Z.: Causality-based +explanation of classification outcomes. In: Proceedings of the Fourth +International Workshop on Data Management for End-to-End Machine +Learning, pp. 1–10 (2020) +[16] Kulesza, T., Burnett, M., Wong, W.-K., Stumpf, S.: Principles of explana- +tory debugging to personalize interactive machine learning. In: Proceed- +ings of the 20th International Conference on Intelligent User Interfaces, +pp. 126–137 (2015) +[17] Goebel, R., Chander, A., Holzinger, K., Lecue, F., Akata, Z., Stumpf, +S., Kieseberg, P., Holzinger, A.: Explainable AI: the new 42? In: Inter- +national Cross-domain Conference for Machine Learning and Knowledge +Extraction, pp. 295–303 (2018). Springer +[18] Hoffman, R.R., Mueller, S.T., Klein, G., Litman, J.: Metrics for explain- +able AI: Challenges and prospects. arXiv preprint arXiv:1812.04608 +(2018) + diff --git a/S9E3T4oBgHgl3EQfzQuQ/content/tmp_files/load_file.txt b/S9E3T4oBgHgl3EQfzQuQ/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..62b25ac6d1334463dbdb14d9854730c2bdbee505 --- /dev/null +++ b/S9E3T4oBgHgl3EQfzQuQ/content/tmp_files/load_file.txt @@ -0,0 +1,578 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf,len=577 +page_content='A Quantum Algorithm for Shapley Value Estimation Iain Burge1*, Michel Barbeau1 and Joaquin Garcia-Alfaro2,1 1*School of Computer Science, Carleton University, Colonel By Dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Ottawa, K1S 5B6, Ontario, Canada.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2Samovar, T´el´ecom SudParis, Institut Polytechnique de Paris, Palaiseau, 91120, France.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Corresponding author(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' E-mail(s): IainBurge@cmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='carleton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='ca;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Contributing authors: barbeau@scs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='carleton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='ca;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' joaquin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='garcia alfaro@telecom-sudparis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='eu;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Abstract The introduction of the European Union’s (EU) set of comprehensive reg- ulations relating to technology, the General Data Protection Regulation, grants EU citizens the right to explanations for automated decisions that have significant effects on their life.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This poses a substantial challenge, as many of today’s state-of-the-art algorithms are generally unexplain- able black boxes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Simultaneously, we have seen an emergence of the fields of quantum computation and quantum AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Due to the fickle nature of quantum information, the problem of explainability is amplified, as mea- suring a quantum system destroys the information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As a result, there is a need for post-hoc explanations for quantum AI algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In the clas- sical context, the cooperative game theory concept of the Shapley value has been adapted for post-hoc explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' However, this approach does not translate to use in quantum computing trivially and can be expo- nentially difficult to implement if not handled with care.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We propose a novel algorithm which reduces the problem of accurately estimating the Shapley values of a quantum algorithm into a far simpler problem of estimating the true average of a binomial distribution in polynomial time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Keywords: Quantum Computing, Cooperative Game Theory, Explainable AI, Quantum AI 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='04727v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='QA] 11 Jan 2023 2 1 Introduction 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Background With the introduction of the European Union’s set of comprehensive regula- tions relating to technology, the General Data Protection Regulation (GDPR), there has been a massive shift in the world of AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Specifically in our case, the GDPR has provided EU citizens a right to explanation [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This poses a sub- stantial challenge, as many of today’s state of the art algorithms, such as Deep learning models, are generally black boxes [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Meaning that even the develop- ers of AI models usually have no way of actually understanding the decisions of their models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' There are two paths one can take to rectify the new need for model explanations, either by making models inherently interpretable, or by coming up with post-hoc explanations for our black-box models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' One more recent axiomatic strategy for post-hoc explainability is based on the game theory concept of the Shapley value which is a powerful measure of contribu- tion [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' However, direct calculation of Shapley values is an NP-hard problem [4, 5], and outside of specific problems types, sampling is the only option for approximating Shapley values [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' On the other hand we have the emergence or quantum algorithms and quantum machine learning (QML) [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Quantum computers are at least naively resistant to explanation, as the even measuring the internal state destroys most of the information within it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Combining this with techniques like deep rein- forcement learning with variational quantum circuits [8] makes interpretability seem impossible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 Problem Statement Inherently interpretable models would likely be best [2], as an explanation of an interpretable model is guaranteed to be correct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' However, much of the research and work in AI over the past couple decades have been into black box models, and many of the benefits of QML may not be possible to implemented in an interpretable fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Ideally, we do not want to throw away all of the previous black box research, so there is value in implementing and improving post-hoc explanation methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Current solutions to post-hoc explanations would unintuitive, or unwieldy to apply in the context of quantum computers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We explore a native quantum solution to post-hoc explainability using Shapley value approximation, where the function itself is approximated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='3 Results We develop a flexible framework for global evaluation of input factors in quantum circuits which approximates the Shapley values of such factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Our framework increases circuit complexity by an additional roughly O(nlogn) c- not gates, with a total increase in circuit depth of O(n), where n is the number 3 of factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The change in space complexity for global evaluations is an addi- tional O(logn) qubits over the circuit being evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This is in stark contrast to the O(2n) assessments needed to directly assess the Shapley values under the general case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2 Background 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Shapley Values 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Cooperative Game Theory Cooperative game theory is the study of coalitional games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 1 A coalitional game can be described as the tuple G = (F, V ), wherein F = {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', N} is a set of N players.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' V is a value function with V (S) ∈ R representing the value of a given coalition S ⊆ F, with the restriction that V (∅) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 2 Given a game G = (F, V ), F = {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , N}, a payoff vector Φ(G) is a vector of length N, which describes the utility Φ(G)i of player i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' A payoff vector is determined by the value function, where player i’s payoff value Φ(G)i is determined by how V (S)S ⊆ F is effected by i’s inclusion or exclusion from S for any possible S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' There are various solution concepts that construct these payoff vectors (or sets of payoff vectors) [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In this paper, we are most interested in Shapley values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 Shapley Values In the early 50s, Shapley introduced a new solution concept to determine resource allocation in cooperative games which we now denote the Shapley value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' It was unique in that it returned a single unique payoff vector, which was thought to be potentially untenable at the time [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The Shapley value can be derived by the use of one of several sets of axioms, in our case we use the following four.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Suppose we have games G = (F, V ) and G′ = (F, V ′), F = {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , N}, and a payoff vector Φ(G), then: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Efficiency: The sum of all utility is equal to the utility of the grand coalition N � i=1 Φ(G)i = V (F) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Equal Treatment: Players i, j are said to be symmetrical if ∀S⊆F, i,j /∈S[V (S∪ {i}) = V (S ∪ {j})].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' If i, and j are symmetric in G, then they are treated equally: Φ(G)i = Φ(G)j 4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Null Player: If player i satisfies ∀S∈F, i/∈S[V (S) = V (S ∪ {i})], then i is a null player.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' If i is a null player then: Φ(G)i = 0 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Additivity: If a player is in two games the Shapley values between the two games is additive: Φ(G + G′)i = Φ(G)i + Φ(G′)i Where a game G + G′ is defined as G = (F, V + V ′), and (V + V ′)(S) = V (S) + V ′(S), S ⊆ F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Amazingly, these axioms lead to a single unique and quite intuitive division of utility [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Even more, the Shapley value of i turns out to be the expected marginal contribution to a random coalition S ⊆ F \\ {i}, where marginal contribution = V (S ∪ {i}) − V (S).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This can be interpreted as a fair division of utility [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='3 Direct Calculation It can be shown that the following equation gives us the payoff vector for the Shapley value solution concept, which we will call Shapley values [12, 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 3 Let G = (F, V ), for simplicity sake, we will now write Φ(G)i as Φi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Then, the Shapley value of the ith factor Φi can be described as: Φi = � S⊆F \\{i} γ(|F \\ {i}|, |S|) · (V (S ∪ {i}) − V (S)) Where, γ(n, m) = 1 � n m � (n + 1) = m!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n − m)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n + 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The Shapley value can be interpreted as a weighted average of contribu- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The weights themselves have an intuitive interpretation, the 1 ( n m) results in each possible size of S having an equal impact on the final value (since given |S| = m, there would be � n m � summands contributing to the final value).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The 1 n+1 averages between the different sizes of S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 Intractability Unfortunately, in spite of all the desirable attributes of Shapley values it has a major weakness, it can be incredibly costly to compute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' With the above formulation one would need to assess V with 2|F \\{i}| different subsets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In general, except for very specific circumstances, there seems to be no clever solutions or reformulations either.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Deterministically Computing Shapley values in the context of weighted voting games has been shown to be NP-complete [4, 5 5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Considering voting games are some of the most simple cooperative games, this result does not bode well for more complex scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In the context of Shapley values for machine learning, it has also been shown that calculation of Shapley values are not tractable for even regression models [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' It was also proven that in an empirical setting finding Shapley values is exponential [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 Explainable AI The importance of eXplainable AI (XAI) is multifaceted, on the one hand, actually understanding a model’s reasoning allows for more robustness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This can be intuited simply with the following thought experiment: imagine imple- menting a traditional program without being able to understand what the computer is doing, where it is literally impossible to debug.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' If by some mira- cle you were able to get it working, it certainly would not be robust to edge cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This is more or less the situation data scientists and engineers are in while developing large black box models, they are stuck with at best naive and heuristic strategies for debugging, without a good way of understanding what the model is doing or why.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' XAI, in particular post-hoc explanations can serve as a critical debugging tool [16, 17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' On the other hand in important, potentially life altering applications such as medicine, loan decisions, law, and various other critical fields we can not afford to rely on AI which we don’t understand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This is not only because of the recent legislative shift with the GDPR [18], but for the obvious moral and practical reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 3 Theory 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Shapley Values and the Beta Function In this subsection, the relationship between the beta function and Shapley values is explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Given a game with a set of F players, a subset S ⊆ F, and a player i, we have, Definition 4 Let n = |F \\{i}|, and m = |S|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We denote the weights used to calculate the Shapley value in the weighted average as: γ(n, m) = m!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n − m)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n + 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' = 1 � n m � (n + 1) Definition 5 Denote a function closely related to the beta function as: Bα,β = 1 � 0 xβ(1 − x)α−βdx, 0 ≤ β ≤ α, α, β ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We will refer to this function as the special beta function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We also denote, bα,β(x) = xβ(1 − x)α−β So that Bα,β = � 1 0 bα,β(x)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 6 Lemma 1 We have the following recurrence relationship: Bα,β = β α − (β − 1)Bα,β−1 Bα,0 = Bα,α = 1 α + 1 Proof Case 1, β = 0 or α: Bα,0 = 1 � 0 (1 − x)αdx = −(1 − x)α+1 α + 1 ���� 1 0 = 1 α + 1 a nearly identical calculation can be used to show Bα,α = 1 α+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Case 2, 0 < β < α: Bα,β = 1 � 0 xβ(1 − x)α−βdx = xβ(1 − x)α−(β−1) α − (β − 1) ���� 1 0 − 1 � 0 −β α − (β − 1)xβ−1(1 − x)α−(β−1)dx = 0 + β α − (β − 1) 1 � 0 xβ−1(1 − x)α−(β−1)dx = β α − (β − 1)Bα,β−1 □ Theorem 2 The B function is equivalent to the Shapley weight function γ: Bn,m = γ(n, m), 0 ≤ m ≤ n, m, n ∈ N Proof Fix n, we proceed by induction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Base case, m = 0: then Bn,0 = 1 n+1 = γ(n, 0), thus the base case holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Inductive step: suppose Bn,k = γ(n, k), k ∈ N, we need to show Bn,k+1 = γ(n, k + 1), 0 ≤ k < α: Bn,k+1 = k + 1 n − k Bn(k) = k + 1 n − k γ(n, k) = k + 1 n − k · k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n − k)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n + 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' = (k + 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n − (k + 1))!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (n + 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' = γ(n, k + 1) □ 7 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 1 Visual representation of the special beta function being approximated using Darboux integrals and our novel partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' To summarize, we have shown that our formulation of the beta function is equivalent to the Shapely value weight function over our domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 Approximating the Special Beta Function In this section, we will be going through the task of showing that we can approximate the beta function using fairly unusual partitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Though it may not be immediately obvious, our partition definition is extremely convenient for a quantum implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' For the moment, it is sufficient to understand this section as pursuing a single goal: showing that our partition can be used to approximate the area under bn,m over range [0, 1], which is equal to the special beta function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In fact, we will show that we can estimate Bn,m with arbitrary accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' For a visual representation of our goal, we will be estimating the area under bn,m over range [0, 1] using our strange partition for a Darboux integral as can be seen with various resolutions in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Partition To begin, we consider a simple function, which, as we will see in later parts, is extremely natural in the quantum context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 1 Consider the following function from the real numbers in the range 0 ≤ x ≤ 1 to the reals such that: sin2 �π 2 x � (1) As can be seen in Figure 2, sin2 � π 2 x � is clearly monotonic and bijective from the domain [0, 1] to the range [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Binomial Area Approximation,(n,m)= (4,1) Resolution = 4 Resolution = 8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='06 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='04 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content="00 0'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='0 Resolution = 16 Resolution = 32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='04 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='02 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='00 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='08 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 2 Graph of function sin2 � π 2 x � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 3 Visualization of Partition Pℓ, ℓ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 4 Visualization of Partition Pℓ, ℓ = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 6 Let ℓ be an non-negative integer, and let Pℓ = � tℓ(0), tℓ(1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , tℓ(2ℓ − 1), tℓ(2ℓ) � be a partition of the interval [0, 1] where, tℓ(k) = sin2 �π 2 · k 2ℓ � Note that tℓ(x) can be interpreted as a discretized version of the function in Remark 1, where instead of x ∈ [0, 1], we have x ∈ � k 2ℓ : k ∈ N � ⊂ [0, 1] for some fixed ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 2 Pℓ+1 is a refinement of Pℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Example 1 In Figures 3 and 4, we can see concrete examples of how tℓ(k) partitions the interval [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Note that P3 has a point corresponding to each point in P2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Specifically, t2(i) = t3(2i), i ∈ N This behaviour is due to the aforementioned refinement relationship between Pℓ and Pℓ+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' It is also worth noting how P3’s intervals can be viewed as the intervals in P2 split in two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 0:5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='8 1 1 1 1 1 1 t2(0) t2(1) t2(2) t2(3) t2(4)0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='89 Lemma 3 We have the following properties for the function tℓ(k): 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This represents a kind of symmetry with respect to k = 2ℓ−1 tℓ(k) = 1 − tℓ � 2ℓ − k � 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='tℓ(k) = tℓ+1(2k) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='Proof Property 1: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='tℓ(2ℓ − k) = sin2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ − k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= sin2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 − k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= sin2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='− k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='+ π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= cos2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='− k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='cos(x) = sin ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='x + π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= cos2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='cos(x) = cos(−x) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 1 − sin2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='cos2(x) = 1 − sin2(x) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 1 − tℓ(k) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='Property 2: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='tℓ+1(2k) = sin 2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= sin 2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='�π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 · k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= tℓ(k) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='□ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='It will be useful to define a width for each sub-interval [tℓ(k),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' tℓ(k + 1)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 7 We denote the width of a sub-interval [tℓ(k), tℓ(k + 1)] in a partition Pℓ as: wℓ(k) = tℓ(k + 1) − tℓ(k) wℓ(k) can be interpreted as a function from k ∈ N, 0 ≤ k ≤ 2ℓ−1, to R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This width varies with respect to both k and ℓ, where ℓ increasing decreases wℓ(k), and middling values of k maximize wℓ(k) as is apparent in Figures 1 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Example 2 In Figure 5, we can see a visual representation of w2(k) for k = 0, 1, 2, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' When refining our partition from Pℓ to Pℓ+1, each sub-interval, [tℓ(k), tℓ(k + 1)], is split into two, [tℓ+1(2k), tℓ+1(2k + 1)] and 10 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 5 Visualization of w2(k), k = 0, 1, 2, 3 over partition P2 [tℓ+1(2k + 1), tℓ+1(2k + 2)] (recall, by lemma 3 property 2, tℓ(k) = tℓ+1(2k)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The relative sizes between the original sub-interval and the new ones are of critical importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 8 The previous-to-new-left-interval-ratio ρ is defined as: ρℓ−1(k) = wℓ(2k) wℓ(2k) + wℓ(2k + 1) ρℓ−1(k) can also be interpreted as a function with k ∈ N, 0 ≤ k ≤ 2ℓ − 1 ρℓ−1(k) represents how the sizes of intervals are modified during a refinement from Pℓ−1 to Pℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 3 The previous-to-new-left-interval-ratio ρ can equivalently be represented as: ρℓ−1(k) = tℓ(2k + 1) − tℓ(2k) tℓ(2k + 2) − tℓ(2k) = wℓ(2k) wℓ−l(k) Example 3 Let us consider P1 equal to the partition (0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='5, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' When we refine to P2, the first interval, intervalold = [0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='5] of P1 is split into two new inter- vals, intervalleft = [0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='15] and intervalright = [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='15, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='5] (note that intervalold = intervalleft∪intervalright).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Consequently, the previous-to-new-left-interval-ratio ρ1(0) is equal to w2(0) w2(0)+w2(1) which is approximately 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='3, see Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' ρ1(0) represents the relative size of the new left interval, intervalleft = [0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='15] compared to the old interval intervalold = [0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 The first partition refinement P0 → P1 splits the interval in two parts, which happen to be equal sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' ρ0(0) = 1 2 This can be verified easily though basic calculation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Lemma 4 As our partition approaches infinite density, the leftmost interval [0, b] is split into two pieces, [0, a] and [a, b], a approaches b 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Equivalently, lim ℓ→∞ ρℓ−1(0) = 1 4 F2(0) - wz(1) HH W2(2) F3 1 1 t2(0) t2(1) t2(2) t2(3) t2(4)11 Proof lim ℓ→∞ ρℓ−1(0) = tℓ(1) − tℓ(0) tℓ(2) − tℓ(0) = lim ℓ→∞ sin2 � π 2 · 1 2ℓ � sin2 � π 2 · 2 2ℓ � = lim ℓ→∞ � π 2 · 1 2ℓ �2 � π 2 · 2 2ℓ �2 = 1 4 □ Lemma 5 ρℓ(0) monotonically decreases as ℓ increases, for ℓ ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Proof Let x = 2 π 2ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Then, ρℓ−1(0) = tℓ 1 − tℓ 0 tℓ 2 − tℓ 0 = sin2 � π 2 · 1 2ℓ � − sin2(0) sin2 � π 2 · 2 2ℓ � − sin2(0) = sin2( 1 x) sin2( 2 x) Define h(x) = sin2( 1 x) sin2( 2 x), x ∈ � 2 π , ∞ � , our result will hold if h(x) decreases mono- tonically as x increases over its domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This is true when d dxh(x) < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We have,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' d dxh(x) = −2 sin � 1 x � � cos � 1 x � sin � 2 x � − 2 sin � 1 x � cos � 2 x �� sin3 � 2 x � x2 Note that for x ≥ 2 π : 2 sin � 1 2x � sin3 � 2 x � x2 > 0 Thus,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' d dxh(x) < 0 when,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' cos 1 x sin 2 x − 2 sin 1 x cos 2 x > 0 We can continue as follows,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' using the double angle identities: cos 1 x sin 2 x − 2 sin 1 x cos 2 x = cos 1 x � 2 sin 1 x cos 1 x � − 2 sin 1 x � cos2 1 x − 1 � = 2 sin 1 x cos2 1 x − 2 sin 1 x cos2 1 x + 2 sin 1 x = 2 sin 1 x > 0 This is trivially true for x ≥ 2 π .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Hence, d dxh(x) < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As a result ρℓ(0) monotonically decreases when ℓ increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 By lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1, ρ0(0) = 1 2, and by lemma 4 limℓ→∞ ρℓ(0) = 1 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Thus, since ρℓ(0) is monotonically decreasing with respect to ℓ (lemma 5), it is clear that ρℓ(0) ∈ � 1 4, 1 2 � 12 Lemma 6 Similarly, we have ρℓ(2ℓ − 1) ∈ � 1 2, 3 4 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Proof Let us begin by showing: ρℓ(2ℓ − 1) = 1 − ρℓ(0) Plugging in 2ℓ − 1, ρℓ(2ℓ − 1) = tℓ(2ℓ − 1) − tℓ(2ℓ − 2) tℓ(2ℓ) − tℓ(2ℓ − 2) = (1 − tℓ(1)) − (1 − tℓ(2)) 1 − (1 − tℓ(2)) (Lemma 3, Property 2) = tℓ(2) − tℓ(1) tℓ(2) = 1 − tℓ(1) tℓ(2) = 1 − ρℓ(0) As was shown in corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1, ρℓ(0) ∈ � 1 4, 1 2 � , thus ρℓ(2ℓ − 1) ∈ � 1 2, 3 4 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ So we have figured out the bounds of ρℓ for the extreme values of its domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The next step will be to show that ρℓ(k) < ρℓ(k + 1) for all valid k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This will give us a chain of inequalities, which will bound all ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Lemma 7 The following statement relates an inequality of weights to an inequality of previous-to-new-left-interval-ratios: wℓ(2k) wℓ(2k + 1) < wℓ(2k + 2) wℓ(2k + 3) ⇐⇒ ρℓ−1(k) < ρℓ−1(k + 1) Proof Applying some basic algebra,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' wℓ(2k) wℓ(2k + 1) < wℓ(2k + 2) wℓ(2k + 3) ⇐⇒ wℓ(2k)wℓ(2k + 3) < wℓ(2k + 2)wℓ(2k + 1) Adding wℓ(2k)wℓ(2k + 2) (> 0 by remark 4) to both sides results in,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' wℓ(2k)wℓ(2k + 2) + wℓ(2k)wℓ(2k + 3) < wℓ(2k)wℓ(2k + 2) + wℓ(2k + 2)wℓ(2k + 1) Which can be factored into,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' wℓ(2k)(wℓ(2k + 2) + wℓ(2k + 3)) < wℓ(2k + 2)(wℓ(2k) + wℓ(2k + 1)) Rearranging gives,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' wℓ(2k) wℓ(2k) + wℓ(2k + 1) < wℓ(2k + 2) wℓ(2k + 2) + wℓ(2k + 3) (2) Finally,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' by definition of previous-to-new-left-interval-ratio ρ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' ρℓ(k) < ρℓ(k + 1) □ 13 This relation will be helpful in showing ρℓ−1(k) < ρℓ−1(k + 1),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' as wℓ is an easier function to manage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 9 Fix ℓ, let us define the following real number extensions of tℓ, T(x) and wℓ, W(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Where the domains and codomains of T(x) and W(x) are R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' T(x) = sin2 �π 2 · x 2ℓ � and, W(x) = T(x + 1) − T(x) where x ∈ [0, 2ℓ − 1] is real.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Corollary 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Fix ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' If k ∈ N, 0 ≤ k ≤ 2ℓ, then T(k) = tℓ(k), and W(k) = wℓ(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' With wℓ and tℓ extended to the real numbers, it is possible to apply tools from calculus, making the next few proofs substantially easier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 4 Clearly, T(x) increases as x increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As a result W(x) > 0 for x ∈ [0, 2ℓ − 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 5 With the new function W, it is possible to extend the expression seen in lemma 7, W(x) W(x + 1) < W(x + 2) W(x + 3), x ∈ � 0, 2ℓ − 1 � With corollary 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 in mind, it is easy to see that if this relation holds in this extended context, then it will also hold for the relation with wℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 6 The relation can be simplified further by considering, W(x) W(x + 1) < W(x + 1) W(x + 2) Assuming this holds for all x, it is very obvious that, W(x) W(x + 1) < W(x + 1) W(x + 2) ⇒ W(x) W(x + 1) < W(x + 2) W(x + 3) So proving W (x) W (x+1) < W (x+1) W (x+2), will be sufficient to show ρℓ(k) < ρℓ(k + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Lemma 8 Fix ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The derivative of W(x) is greater than the derivative of W(x+1): d dxW(x) > d dxW(x + 1) x ∈ � 0, 2ℓ − 1 � Proof We first find d dxW(x + r), for an arbitrary r ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' d dxT(x + r) = d dx sin2 �π 2 �x + r 2ℓ �� 14 = d dx sin2 �α 2 (x + r) � � α = π 2L � = α sin(α(x + r)) Thus,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' we have d dxW(x + r): d dxW(x + r) = d dxT(x + r + 1) − d dxT(x + r) = α � sin(α(x + r + 1)) − sin(α(x + r)) � Finally,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' we show: d dxW(x) > d dxW(x + 1) ⇐⇒ α(sin(α(x + 1)) − sin(αx))) > α(sin(α(x + 2)) − sin(α(x + 1))) ⇐⇒ 2 sin(α(x + 1)) − sin(αx) − sin(α(x + 2)) > 0 Let u = αx,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' v = α then,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 sin(u + v) − sin(u) − sin(u + 2v) = 2(sin u cos v + cos u sin v) − sin u ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='− (sin u cos 2v + cos u sin 2v) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2 sin u cos v + 2 cos u sin v − sin u ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='− sin u(2 cos2 v − 1) − cos u(2 sin v cos v) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2 sin u cos v + 2 cos u sin v − sin u ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='− 2 sin u cos2 v + sin u − 2 sin v cos2 v ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2 sin u cos v(1 − cos v) + 2 cos u sin v(1 − cos v) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2(sin u cos v + cos u sin v)(1 − cos v) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2 sin(u + v)(1 − cos v) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2 sin(αx + α)(1 − cos α) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='= 2 sin ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='π · x + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 − cos π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='It is easily shown that 1 − cos π ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2ℓ > 0 for ℓ ≥ 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' thus,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' the result holds when: sin � π x + 1 2ℓ � > 0 This is equivalent to when 0 < x+1 2ℓ < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Hence, d dxW(x) > d dxW(x + 1), if x ∈ [0, 2L − 1] □ Lemma 9 In the real domain, the ratio of the previous width to the current width increases monotonically, that is, W(x) W(x + 1) < W(x + 1) W(x + 2), x = � 0, 2ℓ − 1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 15 Proof By lemma 8, the derivative of current width W ′(x + 1) is lower than the derivative of the previous width W ′(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This implies that, � x+2 x+1 W ′(z)dz < � x+1 x W ′(z)dz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Let ∆W(x) be equal to � x+1 x W ′(z)dz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Since W(x) > 0 (see Remark 4), multiplying both sides by W(x), it is apparent that W(x)∆W(x + 1) is less than W(x)∆W(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Adding W(x)2 + W(x)∆W(x) to both sides and ∆W(x)2 (which is > 0) to the right hand side shows that W(x)2 + W(x)∆W(x) + W(x)∆W(x + 1) is less than W(x)2 + 2W(x)∆W(x) + ∆W(x)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Factoring gives, W(x) [W(x) + ∆W(x) + ∆W(x + 1)] < (W(x) + ∆W(x))2 Note that W(x) + ∆W(x) is equal to � x 0 W ′(z)dz + � x+1 x W ′(z)dz which is equal to � x+1 0 W ′(z)dz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' By a similar argument it can shown that W(x)+∆W(x)+∆W(x+1) is equal to � x+2 0 W ′(z)dz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Hence, it follows that, W(x) � x+2 0 W ′(z)dz < �� x+1 0 W ′(z)dz �2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' By the fundamental theorem of calculus, W(x)W(x + 2) is less than W(x + 1)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Rearranging yields, W(x) W(x + 1) < W(x + 1) W(x + 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ Corollary 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 The following relation holds, ρℓ(k) < ρℓ(k + 1) for all k ∈ [0, 2ℓ − 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Proof This is a direct result of Lemmas 7 and 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ Theorem 10 For all ℓ, k ∈ N, 0 ≤ k ≤ 2ℓ − 1, ρℓ(k) is bounded with, ρℓ(k) ∈ �1 4 + ϵ, 3 4 − ϵ � , ϵ > 0 Proof By Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 we have ρℓ(0) ∈ � 1 4 + ϵ, 1 2 � , thus ρℓ(0) > 1 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' By Lemma 6 we have ρℓ(2ℓ − 1) ∈ � 1 2, 3 4 − ϵ � , so ρℓ(2ℓ − 1) < 3 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Finally, by Corollary 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1, we have ρℓ(k) < ρℓ(k + 1) for the current range of k = 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , 2ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This results in the following chain of inequalities: 1 4 < ρℓ(0) < ρℓ(1) < · · · < ρℓ � 2ℓ − 1 � < 3 4 □ 16 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 Area Estimation Various important properties about the partitioning scheme Pℓ have been the primary focus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This section focuses on estimating the area under bn,m given a partition Pℓ, using concepts from Darboux integrals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The end goal is to show that error can become arbitrarily small given a large enough ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This section also lays the foundations for a better, and more realistic upper bound for error in estimating Shapley values, which is covered in a later section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 10 The supremum of a set S ⊆ R is, sup(S) = min x∈R x ≥ s, ∀s ∈ S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The infimum of S is, inf(S) = max x∈R x ≤ s, ∀s ∈ S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 11 Darboux sums takes a partition P = (z0, z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , zn) of an interval [a, b], where a = z0 < z1 < · · · < zn = b, and a function f which maps (a, b) to R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Each interval [zi, zi+1] is called a subinterval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Let Mi = sup x∈[zi,zi+1] f(x), and mi = inf x∈[zi,zi+1] f(x), i = 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , n − 1 The upper and lower bounds of a sub interval’s area are, AU(f, [zi, zi+1]) = (zi+1 − zi)Mi, and AL(f, [zi, zi+1]) = (zi+1 − zi)mi respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The upper Darboux sum is: U(f, P) = n−1 � i=0 AU(f, [zi, zi+1]), and the lower Darboux sum is: L(f, P) = n−1 � i=0 AL(f, [zi, zi+1]), There is a geometric interpretation of the Darboux sums.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Each subinterval has a rectangle width corresponding to the subinterval witch, and a height corresponding to either the supremum or infimum of f(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' The upper Darboux sum is the sum of the areas of these rectangles, where their heights correspond to their suprema.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Remark 7 Suppose instead of taking Mi or mi, we took arbitrary elements from each subinterval to represent the heights of the rectangles: n−1 � i=0 (zi+1 + zi)f(xi) xi ∈ [zi, zi+1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' (3) By definitions of supremum and infimum (Definition 10), it is clear that for all i = 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , n − 1, mi ≤ xi ≤ Mi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' It follows that for all i, the areas AL(f, [zi, zi+1]) 17 are less than or equal to the areas (zi+1 − zi)f(xi) which are less than or equal to the areas AU(f, [zi, zi+1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' This gives, L(f, P) ≤ n−1 � i=0 (zi+1 + zi)f(xi) ≤ U(f, P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As a result, there is a lot of freedom in choosing which part of the subinterval to assess f(x) Lemma 11 For any subinterval [zi, zi+1], and function f that maps elements of [zi, zi+1] to R, AL(f, [zi, zi+1]) ≤ zi+1 � zi f(x)dx ≤ AU(f, [zi, zi+1]) Proof Consider AL(f, [zi, zi+1]) which is equal to (zi+1 − zi)mi where mi is equal to infx∈[zi,zi+1] f(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Note that, (zi+1 − zi)mi = zi+1 � zi midx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' By Definition 10, for all x ∈ [zi, zi+1], f(x) greater or equal to mi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Thus, zi+1 � zi f(x)dx ≥ zi+1 � zi midx = AL(f, [zi, zi+1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' A nearly identical argument can be used to show � zi+1 zi f(x)dx ≤ AU(f, [zi, zi+1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ The next lemma bounds the error resulting from estimating the area under a subinterval by picking a random point on that subinterval and making a rectangle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Lemma 12 Take an arbitrary point y in the subinterval [zi, zi+1], then, ������ zi+1 � zi f(x)dx − (zi+1 − zi)f(y) ������ ≤ AU(f, [zi, zi+1]) − AL(f, [zi, zi+1]) Proof Recall that by Definition 11 AL(f, [zi, zi+1]) is equal to (zi+1 − zi)mi, and AU(f, [zi, zi+1]) is equal to (zi+1 − zi)Mi, where, mi = inf x∈[zi,zi+1] f(x), and Mi = sup x∈[zi,zi+1] f(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 18 By Definition 10, mi ≤ f(y) ≤ Mi, which implies AL(f, [zi, zi+1]) ≤ (zi+1 − zi)f(y) ≤ AU(f, [zi, zi+1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Let us assume (zi+1 − zi)f(y) be less than or equal to � zi+1 zi f(x)dx, meaning ����� zi+1 � zi f(x)dx − (zi+1 − zi)f(y) ����� is equal to zi+1 � zi f(x)dx − (zi+1 − zi)f(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Then, since Lemma 11 shows � zi+1 zi f(x)dx ≤ AU(f, [zi, zi+1]), it follows that zi+1 � zi f(x)dx − (zi+1 − zi)f(y) ≤ AU(f, [zi, zi+1]) − (zi+1 − zi)f(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As shown above, it is also the case that (zi+1 − zi)f(y) is greater than or equal to AL(f, [zi, zi+1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As a result, AU(f, [zi, zi+1]) − (zi+1 − zi)f(y) ≤ AU(f, [zi, zi+1]) − AL(f, [zi, zi+1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' With similar argumentation, we can show this also holds for (zi+1−zi)f(y) is greater than or equal to � zi+1 zi f(x)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ Remark 8 Consider the following approach to approximating area under f on the interval [zi, zi+1]: choose an arbitrary x ∈ [zi, zi+1], multiply by width of interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Clearly, in this context, the error is equal to ���(zi+1 − zi)f(x) − � zi+1 zi f(x)dx ���.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' By Lemma 12, it follows that AU(f, [zi, zi+1])−AL(f, [zi, zi+1]) can be used as an upper bound for error when using this this approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Corollary 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 Given a partition Pℓ = � tℓ(0), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , tℓ � 2ℓ�� , the upper bound of error when approximating area under bn,m(x) over the kth subinterval is defined as: UEn,m(ℓ, k) = (tℓ(k + 1) − tℓ(k)) � U(bn,m, [tℓ(k), tℓ(k+1)])−L(bn,m, [tℓ(k), tℓ(k+1)]) � Remark 9 Note that bn,m(x) has at most one local maximum for x ∈ [0, 1], for all valid n, m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' As a result, given a partition P of [0, 1], on every subinter- val of P (except when the subinterval contains the local maximum, which occurs in only one subinterval), bn,m(x) is either monotonically increasing or decreasing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' For each subinterval [zi, zi+1] of P on which bn,m(x) is monotonically increasing, L(bn,m, [zi, zi+1]) is equal to bn,m(zi) and U(bn,m, [zi, zi+1]) is equal to bn,m(zi+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' When bn,m(x) is decreasing over [zi, zi+1], L(bn,m, [zi, zi+1]) is equal to bn,m(zi+1) and U(bn,m, [zi, zi+1]) is equal to bn,m(zi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Corollary 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='2 Given a partition Pℓ = � tℓ(0), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' , tℓ � 2ℓ�� , the following monotonic-assumption upper bound of error when approximating the area under bn,m(x) over the kth subinterval is defined as: UEn,m(ℓ, k) = (tℓ(k + 1) − tℓ(k)) ��bn,m(tℓ(k + 1)) − bn,m(tℓ(k)) �� This definition makes easy finding an error upper bound for all intervals except one, the one containing the local maximum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 19 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 6 Example visualization of UEn,e’s change during refinement for arbitrary function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Corollary 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='3 If bn,m(x) is monotonic over [tℓ(k), tℓ(k + 1)], then UEn,m(ℓ, k) is equal to UEn,m(ℓ, k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Lemma 13 Let us suppose a partition Pℓ−1 is refined to partition Pℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Given a subinterval [tℓ−1(k), tℓ−1(k + 1)] of Pℓ, the monotonic-assumption upper bound of error is reduced over that interval by a factor of at least 3/4, that is: 3 · UEn,m(ℓ − 1, k) 4 > � UEn,m(ℓ, 2k) + UEn,m(ℓ, 2k + 1) � Proof Let us consider Figure 6, there are the following widths, x = tℓ−1(k + 1) − tℓ−1(k), y = |bn,m(tℓ−1(k + 1)) − bn,m(tℓ−1(k))| , where x and y represent the width and change in height of the previous subinterval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Also there are the following widths, x1 = tℓ(2k + 1) − tℓ(2k), y1 = |bn,m(tℓ(2k + 1)) − bn,m(tℓ(2k))| , where x1 and y1 represent the width and change in height of the left part of split previous subinterval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' It follows that the width and change in height of the right part of the split subinterval have values, x2 = x − x1, y2 = y − y1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' It is concluded that, UEn,m(ℓ − 1, k) = x · y, UEn,m(ℓ, 2k) = x1 · y1, and UEn,m(ℓ, 2k + 1) = x2 · y2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Finally, let us define x = x1 x , and y = y1 y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Note that when considering UEn,m(ℓ−1, k), x = x1 x = tℓ(2k + 1) − tℓ(2k) tℓ(2k + 2) − tℓ(2k) = ρℓ−1(k), a y2 9 Y1 1 220 by Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Thus by Theorem 10, 1 4 < x < 3 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Simultaneously, 0 ≤ y ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' We proceed by simplifying the following expression UEn,m(ℓ, 2k) + UEn,m(ℓ, 2k + 1) UEn,m(ℓ − 1, k) = x1y1 + x2y2 xy Plugging in the definition for x2 and y2, the above is equivalent to, x1y1 + (x − x1)(y − y1) xy Doing the product and applying the definitions for x and y yields 2x y − y − x + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Rearranging, it can be shown that the above is equal to 2 � x − 1 2 � � y − 1 2 � + 1 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Let us assume that (y − 1/2) is positive without loss of generality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Then assigning to x and y their respective maximum values, the following inequality is obtained, UEn,m(ℓ, 2k) + UEn,m(ℓ, 2k + 1) UEn,m(ℓ − 1, k) < 2 �3 4 − 1 2 � � 1 − 1 2 � + 1 2 = 3 4 A similar argument can be used for when (y − 1/2) is negative, and the above is trivially correct for (y − 1/2) equals zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ Corollary 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='1 For all monotonic sub-intervals [tℓ(k), tℓ(k + 1)], the upper bound of error UEn,m(ℓ, k) is reduced by at least 25% when the partition is refined from Pℓ to Pℓ+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Proof This follows as a direct result of Lemma 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ Next, let us consider error over the whole of the approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 12 We denote the sum of upper bounds for error over all sub-intervals as: SUEn,m(ℓ) = 2ℓ−1 � k=0 UEn,m(ℓ, k), and the sum of upper bounds for error over all sub-intervals with the monotonic assumption for all sub-intervals as: SUEn,m(ℓ) = 2ℓ−1 � k=0 UEn,m(ℓ, k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' To discuss how error evolves with respect to granularity of our partition, having an upper bound for initial error is critical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Definition 13 We denote initial error as, σn,m = SUEn,m(0) 21 Remark 10 Using the first and second derivative tests, one can verify that bn,m(m/n) is the supremum of bn,m(x) for x ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' One can also easily show the infimum of bn,m(x) is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Thus, σn,m = SUEn,m(0) = UEn,m(0, 0) = bn,m �m n � Lemma 14 SUEn,m(ℓ + 1) ≤ 3 4SUEn,m(ℓ) Proof By Definition 12, SUEn,m(ℓ + 1) = 2ℓ+1−1 � k=0 UEn,m(ℓ + 1, k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Rearranging the values in the sum yields, SUEn,m(ℓ + 1) = 2ℓ−1 � k=0 � UEn,m(ℓ + 1, 2k) + UEn,m(ℓ + 1, 2k + 1) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Hence by Lemma 13, SUEn,m(ℓ + 1) < 2ℓ−1 � k=0 �3 4UEn,m(ℓ, k) � = 3 4SUEn,m(ℓ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' □ References [1] Goodman, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Flaxman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': European Union regulations on algorithmic decision-making and a “right to explanation”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' AI magazine 38(3), 50–57 (2017) [2] Rudin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Nature Machine Intelligence 1(5), 206–215 (2019) [3] Lundberg, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Lee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='-I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' : A unified approach to interpreting model predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Advances in neural information processing systems 30 (2017) [4] Matsui, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Matsui, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': NP-completeness for calculating power indices of weighted majority games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Theoretical Computer Science 263(1-2), 305– 310 (2001) [5] Prasad, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Kelly, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' : NP-completeness of some problems concerning voting games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' International Journal of Game Theory 19(1), 1–9 (1990) [6] Castro, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', G´omez, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Tejada, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Polynomial calculation of the Shapley value based on sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Computers & Operations Research 36(5), 1726– 1730 (2009) 22 [7] Biamonte, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Wittek, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Pancotti, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Rebentrost, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Wiebe, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Lloyd, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Quantum machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Nature 549(7671), 195–202 (2017) [8] Chen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Yang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Qi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Chen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Ma, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Goan, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='- S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Variational quantum circuits for deep reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' IEEE Access 8, 141007–141024 (2020) [9] Aumann, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Some non-superadditive games, and their Shapley values, in the Talmud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' International Journal of Game Theory 39 (2010) [10] Winter, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': The Shapley value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Handbook of game theory with economic applications 3, 2025–2054 (2002) [11] Hart, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Shapley value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In: Game Theory, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 210–216.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Springer, (1989) [12] Shapley, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' : A Value for N-Person Games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' RAND Corporation, Santa Monica, CA (1952).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='7249/P0295 [13] Lipovetsky, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Conklin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Analysis of regression in game theory approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Applied Stochastic Models in Business and Industry 17(4), 319–330 (2001) [14] Van den Broeck, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Lykov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Schleich, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Suciu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': On the tractability of SHAP explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Journal of Artificial Intelligence Research 74, 851– 886 (2022) [15] Bertossi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Schleich, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Suciu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Vagena, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Causality-based explanation of classification outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In: Proceedings of the Fourth International Workshop on Data Management for End-to-End Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 1–10 (2020) [16] Kulesza, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Burnett, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Wong, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Stumpf, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Principles of explana- tory debugging to personalize interactive machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In: Proceed- ings of the 20th International Conference on Intelligent User Interfaces, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 126–137 (2015) [17] Goebel, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Chander, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Holzinger, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Lecue, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Akata, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Stumpf, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Kieseberg, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Holzinger, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Explainable AI: the new 42?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' In: Inter- national Cross-domain Conference for Machine Learning and Knowledge Extraction, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' 295–303 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' Springer [18] Hoffman, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Mueller, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Klein, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=', Litman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=': Metrics for explain- able AI: Challenges and prospects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content=' arXiv preprint arXiv:1812.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} +page_content='04608 (2018)' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/S9E3T4oBgHgl3EQfzQuQ/content/2301.04727v1.pdf'} diff --git a/SNE1T4oBgHgl3EQfHwN5/content/tmp_files/2301.02930v1.pdf.txt b/SNE1T4oBgHgl3EQfHwN5/content/tmp_files/2301.02930v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce98b01b4eaeab032bcae7b30ddec54dfc8ac5e9 --- /dev/null +++ b/SNE1T4oBgHgl3EQfHwN5/content/tmp_files/2301.02930v1.pdf.txt @@ -0,0 +1,4059 @@ +Prepared for submission to JHEP +Complex critical points in Lorentzian spinfoam +quantum gravity: 4-simplex amplitude and effective +dynamics on double-∆3 complex +Muxin Han1,2 Hongguang Liu2 Dongxue Qu3,1 +1Department of Physics, Florida Atlantic University, 777 Glades Road, Boca Raton, FL 33431-0991, USA +2Department Physik, Institut f¨ur Quantengravitation, Theoretische Physik III, Friedrich-Alexander Univer- +sit¨at Erlangen-N¨urnberg, Staudtstr. 7/B2, 91058 Erlangen, Germany +3Perimeter Institute for Theoretical Physics, 31 Caroline St N, Waterloo, ON N2L 2Y5, Canada +E-mail: hanm(AT)fau.edu, hongguang.liu(AT)gravity.fau.de, +dqu(AT)perimeterinstitute.ca +Abstract: The complex critical points are analyzed in the 4-dimensional Lorentzian Engle-Pereira- +Rovelli-Livine (EPRL) spinfoam model in the large-j regime. For the 4-simplex amplitude, taking +into account the complex critical point generalizes the large-j asymptotics to the situation with +non-Regge boundary data and relates to the twisted geometry. For generic simplicial complexes, we +present a general procedure to derive the effective theory of Regge geometries from the spinfoam +amplitude in the large-j regime by using the complex critical points. The effective theory is analyzed +in detail for the spinfoam amplitude on the double-∆3 simplicial complex. We numerically compute +the effective action and the solution of the effective equation of motion on the double-∆3 complex. +The effective theory reproduces the classical Regge gravity when the Barbero-Immirzi parameter γ +is small. +arXiv:2301.02930v1 [gr-qc] 7 Jan 2023 + +Contents +1 +Introduction +1 +2 +Spinfoam amplitude +3 +3 +Complex critical point and effective dynamics +6 +4 +Four-simplex amplitude +9 +4.1 +The amplitude and parametrization of variables +10 +4.2 +Deviating from the shape-matching +12 +5 +Revisit the ∆3 amplitude +14 +6 +Double-∆3 amplitude and effective action +16 +6.1 +Some setups +16 +6.2 +Numerical computing the effective action +19 +6.3 +Comparing to Regge action +21 +7 +Solutions of effective dynamics on double-∆3 +23 +7.1 +Spinfoam complex critical point and the Regge solution δLRegge +c +23 +7.2 +Complex critical point and the other Regge solution δ�LRegge +c +27 +8 +Conclusion and Outlook +28 +A Boundary data for single 4-simplex +29 +B The Newton-Raphson method +30 +C Boundary data for the ∆2 +3 complex +31 +C.1 Boundary data and the real critical point for the flat ∆2 +3 complex +31 +C.2 Boundary data and the pseudo critical points for the curved ∆2 +3 complex +33 +D Regge Action +35 +1 +Introduction +The perturbative expansion is widely used in quantum theory to make approximate predictions +order by order in certain parameter. The method of perturbative expansion is well-connected to +the path integral formulation, whose stationary phase approximation results in the semiclassical +expansion in ℏ. By the stationary phase approximation, the path integral is approximately computed +by the dominant contribution from the critical point and neighborhood. The critical point is the +solution of the equation of motion, which is obtained from variating the action in the path integral. +Given a path integral in terms of real variables, traditionally, the semiclassical expansion only takes +into account critical points inside the real integration cycle. However, the recent progress in many +research areas demonstrates that the complex critical point generically away from the real integration +cycle plays a crucial role in the semiclassical expansion of the path integral (see e.g. [1–6]). The +– 1 – + +complex critical point is the critical point of the analytically continued path integral, where the +integrand is analytically extended to the complexification of the real integration cycle. +The method of stationary phase approximation has been applied extensively to the spinfoam +amplitude in Loop Quantum Gravity (LQG) (see e.g. [7–11]). The importance of the complex +critical point has been demonstrated in the recent progress in the semiclassical analysis of spinfoam +amplitude [12–14]. A key result is that the semiclassical curved spacetime geometry can only emerge +from the complex critical point of the spinfoam amplitude. Taking into account the complex critical +point provides the resolution to the long-standing “flatness problem”, i.e., the problem of discovering +only the flat spacetime geometry in the spinfoam amplitude. This problem turns out to be the +confusion from ignoring the complex critical point. +The present work continues from the earlier work [12] and further study the complex critical +points and their implications in spinfoam amplitude. +The discussion in this work focuses on +the 4-dimensional Lorentzian Engle-Pereira-Rovelli-Livine (EPRL) spinfoam model. Our results +demonstrate the impact of the complex critical points mainly from two perspectives: +• At the level of one 4-simplex amplitude, taking into account the complex critical point +generalizes the large-j asymptotics by Barrett et al [8] to the case of non-Regge boundary data. +The geometry of the non-Regge boundary data gives the boundary tetrahedra that are glued +only with area-matching but without shape-matching, in contrast to the Regge boundary data +that requires the shape-matching condition (as well as the orientation matching condition) and +determines the Regge boundary geometry. The generalized 4-simplex amplitude asymptotic +behavior depends analytically on the boundary data. This analytic dependence is not manifest +in the original asymptotic formula in [8]. The computation of the generalized asymptotic +behavior relies on the numerical method. The discussion in Section 4 provides the general +algorithm of computing the complex critical point of the amplitude, and demonstrates the +numerical results of the asymptotics for a 1-parameter family of non-Regge boundary data. +• Based on the application of complex critical points, we develop a formalism to derive the +effective theory of Regge geometry from the large-j spinfoam amplitude. As the result, given +a simplicial complex K with M internal segments, the spinfoam amplitude A(K) with Regge +boundary data reduces to the integral over the internal line-segment lengths lI, I = 1, · · · , M, +A(K) ∼ +� +M +� +I=1 +dµ(lI) eλS(⃗l) [1 + O(1/λ)] , +λ ≫ 1, +(1.1) +within the neighborhood of the integration domain of A(K). λ is the scaling parameter of +spins jf. eλS(⃗l) with the effective action S(⃗l) comes from evaluating the analytically continued +integrand of A(K) at the complex critical point, which depend analytically on lI. The integral +in (1.1) reduced from A(K) is over the Regge geometries with the fixed boundary condition. +The equation of motion ∂lIS(⃗l) = 0 gives the effective dynamics of Regge geometry implied +by the spinfoam amplitude. The formalism of deriving the effective theory is discussed in +Section 3. In Sections 6 and 7, we apply the formalism to the double-∆3 simplicial complex, +which contains only a single internal segment, i.e., M = 1. The complex critical points and the +effective action S(⃗l) are computed numerically following the general algorithm. The spinfoam +amplitude depends on the Barbero-Immirzi parameter γ. The computations are performed +for many different values of the Barbero-Immirzi parameter γ, ranging from small to large. +The resulting S(⃗l) are compared with the Regge action on the double-∆3 complex. S(⃗l) is +well-approximated by the classical Regge action in the small-γ regime, and S(⃗l) provides the +correction to the Regge action with increasing γ. The solutions of the effective dynamics +are computed numerically for different values of γ and compared to the solution of Regge +– 2 – + +equation. The solution from S(⃗l) well-approximates the Regge solution for small γ and gives +larger correction when increasing γ. Recovering the classical Regge action and solution from +the effective dynamics of spinfoam amplitude gives evidence of the semiclassical consistency of +spinfoam quantum gravity. +Recovering the classical Regge gravity from the spinfoam amplitude with small γ has been +argued earlier in [15–21]. Our numerical result confirms this property for the spinfoam amplitude +on the double-∆3 complex. +The numerical computations are performed for different γ’s ranging from small to large. Fixing +the boundary data, the solutions of the effective dynamics give a trajectory in the space of Regge +geometries parametrized by γ. The trajectory approaches the solution of the classical Regge equation +for small γ as mentioned above. For large γ, the trajectory stablizes at the Regge geometry that is +different from the classical Regge solution. It suggests that the effective theory for large γ differs +significantly from the Regge gravity. The solutions both at small and large γ give non-suppressed +contributions to the spinfoam amplitude. In particular, the solutions for large γ violate the known +bound |γδh| ≲ λ−1/2 [11–13] (δh is the deficit angle of the Regge geometry), which is valid for +non-suppressed contributions to the amplitude with finite and small γ. +Studying the complex critical points in the spinfoam amplitude closely relates to the recent +progress in numerical studies of spinfoam amplitudes [22]. Given the complexity of the spinfoam +amplitude, the complex critical point and the corresponding contribution to the spinfoam amplitude +has to be computed numerically. The numerical analysis of complex critical points connects to +the Lefschetz-thimble and Monte-Carlo computation for the spinfoam integral [23], because every +complex critical point associates to an integration cycle known as Lefschetz thimble, and the integral +on the Lefschetz thimble collects all contributions associated to the complex critical point. Another +related numerical result is the semiclassical expansion of the spinfoam amplitude to the next-to- +leading order from the stationary phase approximation [24]. We also would like to mention a few +other numerical approaches for spinfoam quantum gravity, including the “sl2cfoam-next” code for +the non-perturbative computation of the spinfoam amplitude [25–27], the effective spinfoam model +[13, 28], the hybrid algorithm [29], and the spinfoam renormalization [30, 31], etc. +This paper is organized as follows: Section 2 gives a brief review of the integral representation of +the EPRL spinfoam amplitude and the definition of the large-j regime. In Section 3, we define the +real and complex critical points and discuss the general formalism of deriving the effective dynamics +of Regge geometry. Section 4 studies the complex critical point of the 4-simplex amplitude and +generalizes the large-j asymptotics to include the non-Regge boundary data. Section 5 revisits +the known results on the spinfoam amplitude on ∆3 complex as the preparation for analyzing the +amplitude on the double-∆3 complex. Section 6 discusses the complex critical point in the spinfoam +amplitude on the double-∆3 complex and computes the effective action. Section 7 discusses the +numerical solution of the effective dynamics on the double-∆3 complex. In Section 8, we conclude +and discuss some outlooks. +2 +Spinfoam amplitude +A 4-dimensional simplicial complex K contains 4-simplices v, tetrahedra e, triangles f, line segments, +and points. The internal and boundary triangles are denoted by h and b (f is either h or b). The +SU(2) spins jh, jb ∈ N0/2 are assigned to internal and boundary triangles h, b. The spins label the +quanta of triangle areas. The LQG area spectrum indicates that the quantum area of triangle f is +given by af = 8πγGℏ +� +jf(jf + 1) [32, 33]. In the large-j regime, which we will focus on, the area +spectrum gives af ≃ 8πγGℏjf, or af ≃ γjf when we set the unit such that 8πGℏ = 1. +– 3 – + +The Lorentzian EPRL spinfoam amplitude on K is given by summing over internal spins {jh}: +A(K) = +� +{jh} +� +h +djh +� +[dgdz] eS(jh,gve,zvf ;jb,ξeb), +(2.1) +[dgdz] = +� +(v,e) +dgve +� +(v,f) +dΩzvf , +(2.2) +where djh = 2jh+1. The boundary states are SU(2) coherent states |jb, ξeb⟩ where ξeb = ueb�(1, 0)T, +ueb ∈ SU(2). jb and ξeb are determined by the area and the 3-normal of the boundary triangle b. +The summed/integrated variables are gve ∈ SL(2, C), zvf ∈ CP1, and jh. dgve is the Haar measure +on SL(2, C), +dg = dβdβ∗dγdγ∗dδdδ∗ +|δ|2 +, +∀g = +� α β +γ δ +� +∈ SL(2, C), +(2.3) +and dΩzvf is the scaling invariant measure on CP1: +dΩzvf = i +2 +(z0 dz1 − z1 dz0) ∧ (¯z0 d¯z1 − ¯z1 d¯z0) +⟨Zvef, Zvef⟩ ⟨Zve′f, Zve′f⟩ +, +∀ zvf = (z0, z1)T, +(2.4) +where Zvef = g† +vezvf, ⟨·, ·⟩ is the Hermitian inner product on C2, and zvf is a 2-component spinor +for the face f. +The spinfoam action S in Eq.(2.1) is complex and linear to jh, jb in an expression of the form +[34], +S = +� +e′ +jhF(e′,h) + +� +(e,b) +jbF in/out +(e,b) ++ +� +(e′,b) +jbF in/out +(e′,b) , +(2.5) +F out +(e,b) = 2 ln ⟨Zveb, ξeb⟩ +∥Zveb∥ ++ iγ ln ∥Zveb∥2 , +(2.6) +F in +(e,b) = 2 ln ⟨ξeb, Zv′eb⟩ +∥Zv′eb∥ +− iγ ln ∥Zv′eb∥2 , +(2.7) +F(e′,f) = 2 ln ⟨Zve′f, Zv′e′f⟩ +∥Zve′f∥ ∥Zv′e′f∥ + iγ ln ∥Zve′f∥2 +∥Zv′e′f∥2 . +(2.8) +Here, e and e′ are boundary and internal tetrahedra, respectively. In the dual complex K∗, the +orientation of ∂f ∗ is outgoing from the vertex dual to v and incoming to another vertex dual to v′, +and the orientation of the face f ∗ dual to f induces ∂f ∗’s orientation. As for the logarithms in the +spinfoam action, we fix all the logarithms to be the principal values. The derivation of the spinfoam +action S is given in [34]. +The spinfoam amplitude in the formulation (2.1) has the following three types of continuous +gauge degrees of freedom, and thus some gauge fixings are needed to remove the redundant degrees +of freedom: +• Firstly, there is SL(2, C) gauge transformation at each v: +gve �→ x−1 +v gve, +zvf �→ x† +vzvf, +xv ∈ SL(2, C). +(2.9) +To remove this gauge degree of freedom, we fix one gve to be a constant SL(2, C) matrix for +each 4-simplex. The amplitude is independent of the choices of constant matrices. +– 4 – + +• Secondly, there is SU(2) gauge transformation on each internal e: +gv′e �→ gv′eh−1 +e , +gve �→ gveh−1 +e , +he ∈ SU(2). +(2.10) +To fix this SU(2) gauge freedom, one can parameterize one of two SL(2, C) elements: gve, or +gv′e by the upper triangular matrix +k = +�λ−1 µ +0 +λ +� +, λ ∈ R \ {0}, µ ∈ C +(2.11) +Here, we use the fact that any g ∈ SL(2, C) can be decomposed as g = kh with h ∈ SU(2) and +k an upper triangular matrix in Eq.(2.11). +• Thirdly, for each zvf, there is the scaling gauge freedom: +zvf �→ λvfzvf, +λvf ∈ C. +(2.12) +Here, we fix the gauge by setting the first component of zvf to 1, i.e. zvf = (1, αvf)T, where +αvf ∈ C. +Furthermore, in Eq.(2.1), we assume the summation over internal jh ∈ N0/2 is bounded by jmax. +In some situations, jmax is determined by boundary spins jb via the triangle inequality, otherwise +jmax are imposed as the cut-off to regularize the infinite sum over spins. To prepare for the stationary +phase analysis, we would like to change the summation over jh in Eq.(2.1) to integrals. The idea is +to apply the Poisson summation formula. Firstly, we replace each djh by a smooth compact support +function τ[−ϵ,jmax+ϵ](jh) satisfying +τ[−ϵ,jmax+ϵ](jh) = djh, for jh ∈ [0, jmax], +and +τ[−ϵ,jmax+ϵ](jh) = 0, for jh ̸∈ [−ϵ, jmax + ϵ], +for any 0 < ϵ < 1/2. This replacement does not change the value of the amplitude A(K) but makes +the summand of � +jh smooth and compact support in jh. Then, by applying the Poisson summation +formula, +� +n∈Z +f(n) = +� +k∈Z +� +R +dnf(n) e2πikn, +the discrete summation over jh in Eq.(2.1) becomes summing of integrals: +A(K) = +� +{kh∈Z} +� � +h +djh +� +h +2τ[−ϵ,jmax+ϵ](jh) +� +[dgdz] eS(k), +(2.13) +S(k) = S + 4πi +� +h +jhkh. +(2.14) +By the area spectrum, the classical area af and small ℏ imply the large spin jf ≫ 1. This motivates +understanding the large-j regime as the semiclassical regime of A(K). Then, to probe the semiclassical +regime, we scale uniformly both the boundary spins jb and the internal spin cut-off jmax by +jb → λjb, +jmax → λjmax, +λ ≫ 1, +(2.15) +so S → λS as a result from S being linear in jb, jh. As a consequence, the spinfoam amlitude A(K) +– 5 – + +in the large-j regime is +A(K) = +� +{kh∈Z} +� +R +� +h +djh +� +h +2λ τ[−ϵ,λjmax+ϵ](λjh) +� +[dgdz] eλS(k), +(2.16) +S(k) = S + 4πi +� +h +jhkh, +(2.17) +by the change of integration variables jh → λjh, and jh is continous. +3 +Complex critical point and effective dynamics +The integral in (2.16) at each kh can be analyzed with the stationary phase method in the regime +λ ≫ 1. By the standard argument of the stationary phase approximation, by fixing the boundary +data, the integral with λ ≫ 1 is approximated by the dominant contributions from the solutions of +critical equations and neighborhood. In the case of the integrals in (2.16), the critical equations are +Re(S) = ∂gveS = ∂zvf S = 0, +(3.1) +∂jhS = 4πikh, +kh ∈ Z. +(3.2) +The solutions inside the integration domain are denoted by {˚jh,˚gve,˚zvf}. The integration domain is +viewed as a real manifold, and the integration variables are real and imaginary parts of the matrix +elements in gve and zvf. We call {˚jh,˚gve,˚zvf} the real critical point accordingly. +The existence of the real critical point in (2.16) depends on the boundary condition. The real +critical point may not exist for the generic boundary condition. We know that S is a complex +action with n real variables x, and ∂xS = 0 gives n complex thus 2n real equations, which is +over-constrained for n real variables. Consequently, the critical equations (3.1) and (3.2) coupled +with one more equation Re(S) = 0 result in the nonexistence of the general real solution, unless for +some special boundary conditions. +As a solution to this problem of over-constrained equations, the integration variables have to +be complexified, and action S has to be analytically continued to the complex variables z. We are +only interested in the integration domain where the spinfoam action S is analytic. The analytically +continued action is denoted by S. On the space of complex variables, the complex critical equation +∂zS = 0 is not over-constrained anymore because it gives n complex equations for n complex +variables. Re(S) = 0 is dropped when we study S instead of S. In the space of complex variables, +the solutions of ∂zS = 0 are called the complex critical points, which play the dominant role for the +asymptotics of A(K) in the case that the real critical point is absent. +Before discussing the complex critical point, let us firstly review some known results from the +critical equations (3.1) and (3.2) with the boundary data corresponding to Regge geometry on +∂K. The real solutions of the part (3.1) have been well-studied in the literature [7–9, 34]. We call +these solutions the pseudo-critical points. As one of the results, the pseudo-critical point satisfying +a nondegeneracy condition endows a Regge geometry on K with certain 4-simplex orientations. +When focusing on the pseudo-critical points endowing the uniform orientations to all 4-simplices, +further imposing (3.2) to them gives the accidental flatness constraint to their corresponding Regge +geometries, i.e., every deficit angle δh hinged by the internal triangle h [11, 35] satisfies: +γδh = 4πkh, +kh ∈ Z. +(3.3) +When kh = 0, δh at every internal triangle is zero, and the Regge geometry endowed by the real +critical point is flat. Eq.(3.3) is a strong constraint to the allowed geometry from the spinfoams and +– 6 – + +can be satisfied only for special boundary conditions that admit the flat bulk geometry (mod 4πZ). +The accidental flatness constraint is consistent with the above argument about over-constrained +equations, and it has been demonstrated explicitly in the example well-studied in, e.g., [12, 36]. If +one only considers the real critical point for the dominant contribution to A(K), Eq.(3.3) would +imply that only the flat geometry (mod 4πZ) exists. This confusion leading to the flatness problem +results from ignoring the complex critical point in the stationary phase analysis. +In the following discussion, we show that the large-λ spinfoam amplitude does receive dominant +contributions from the complex critical points away from the real integration domain. The complex +critical points precisely correspond to the curved Regge geometries emergent from the spinfoam +amplitude. Interestingly, the application of complex critical points leads to a derivation of effective +dynamics of Regge geometry from the spinfoam amplitude. The emergent curved Regge geometries +are constrained by the effective dynamics. We firstly provide a general formalism below, then we +apply the formalism to the concrete models with several different K in the following sections. +Motivated by relating to the dynamics of Regge geometry, we separate the integral in the +amplitude (2.16) into two parts. Suppose K has M internal segments, the dynamics of Regge +geometry should relate to the dynamics of these internal segment-lengths. Motivated by this, we +separate M internal areas jho (ho = 1, · · · , M) from other j¯h (¯h = 1, · · · , F − M), where jho relates +to the segment-lengths. Here, F is the total number of internal triangles in K, and M equals the +number of the separated internal segments. The spinfoam amplitude (2.16) then becomes +A(K) = +� +{kh} +� +M +� +ho=1 +djhoZ{kh} +K +(jho) , +(3.4) +where Z{kh} +K +, called the partial amplitude, is given by +Z{kh} +K +(jho) = +� � +¯h +dj¯h +� +h +(2λdλjh) +� +[dgdz]eλS(k). +(3.5) +We can then change variables from the areas jho to the internal segment-lengths {lI}M +I=1, with I +denoting the internal segment. The internal triangles ho = 1, · · · , M are suitably chosen such that +the change of variables is well-defined in the interested region, e.g. a neighborhood of {˚jho} of +{˚jh,˚gve,˚zvf} corresponding to the flat geometry. Indeed, the chosen M areas {jho} are related +to M segment-lengths {lI} by Heron’s formula. Inverting the relation between {jho}M +ho=1 and +{lI}M +I=1 defines the local change of variables (jho, j¯h) → (lI, j¯h) in a neighborhood K of a given +Regge geometry in the integration domain of (2.16). This procedure is just changing variables +without imposing any restrictions. When focusing on the integrals in the neighborhood K, we have +dM+Njh = JldMlI dF −Mj¯h, where Jl = det(∂jho/∂lI) is the jacobian obtained by the derivatives +of Heron’s formula. Therefore, the contribution to A(K) from the neighborhood K is expressed as +� +{kh} +� +M +� +I=1 +dlIJlZ{kh} +K +(lI) , +(3.6) +The partial amplitude Z{kh} +K +has the external parameters r ≡ {lI, jb, ξeb} including not only the +boundary data jb, ξeb but also internal segment-lengths lI. The above decomposition of jh-integrals +closely relates to the earlier proposal [37, 38] (see also [39] in the context of area Regge calculus). lI +parametrizes a submanifold MRegge in the space of jh. The submanifold MRegge collects jh’s that +can be interpreted as areas determined by the segment lengths lI (by Heron’s formula). Generically +the space of jh is much larger than the space of segment lengths [40]. j¯h parametrizes the direction +– 7 – + +transverse to MRegge. +To study the partial amplitude Z{kh} +K +, we apply the theory of stationary phase approximation +for complex action with parameters [41, 42]. In the following, we only consider the partial amplitude +with kh = 0, while the situation with other kh can be studied analogously. We consider the large-λ +integral +� +K eλS(r,x)dNx, and regard r as the external parameters. S(r, x) is an analytic function of +r ∈ U ⊂ Rk, x ∈ K ⊂ RN. U × K is a neighborhood of (˚r,˚x), where ˚x is a real critical point of +S(˚r, x). S(r, z) with z = x + iy ∈ CN is the analytic extension of S(r, x) to a complex neighborhood +of ˚x. The complex critical equation is +∂zS = 0, +(3.7) +whose solution is z = Z(r). Here, Z(r) is an analytic function of r in the neighborhood U. When +r = ˚r, Z(˚r) = ˚x reduces to the real critical point. When r deviates away from ˚r, Z(r) ∈ CN can +move away from the real plane RN, thus is called the complex critical point (see Figure. 1). With +Figure 1. The real and complex critical points ˚x and Z(r). S(r, z) is analytic extended from the real axis +to the complex neighborhood illustrated by the red disk. +this in mind, we have the following large-λ asymptotic expansion for the integral +� +K +eλS(r,x)dNx = +� 1 +λ +� N +2 +eλS(r,Z(r)) +� +det +� +−∂2z,zS(r, Z(r))/2π +� [1 + O(1/λ)] +(3.8) +where S(r, Z(r)) and δ2 +z,zS(r, Z(r)) are the action and Hessian at the complex critical point. In +addition, the real part of S is zero or negative. More precisely, there exists a constant C > 0 such +that +Re(S) ≤ −C| Im(Z)|2. +(3.9) +See [41, 42] for the proof of this inequality. This inequality indicates that Re(S) = 0 resulting in the +oscillatory phase in (3.8) can only happen at the real critical point, where Im(Z) = 0 and r = ˚r. +When r deviates from ˚r with a finite distance, such that Im(Z) is finite and Re(S) is negative, +(3.8) is exponentially suppressed when scaling λ to large. The asymptotic formula (3.8) depends +analytically on r and interpolates the two different behaviors smoothly in the parameter space of r: +• The critical point is not real, then Re(S) < 0, which gives the exponentially decaying amplitude. +• The critical point is real, then Re(S) = 0, and thus eλS gives an oscillatory phase. +These two distinct behaviors are obtained by fixing r and scaling λ. But since the asymptotic +formula (3.8) depends on r analytically, we can vary r simultaneously as scaling λ. Then we can +– 8 – + +(u)Z += Z(%)arrive at the regime where the asymptotic behavior (3.8) is not suppressed at the complex critical +point. Indeed, for any large λ, there always exists r ̸= ˚r but sufficiently close to ˚r, such that Im(Z) +and Re(S) are small enough, then eλS in (3.8) is not suppressed at the complex critical point. +The importance of (3.8) is that the integral can receive a dominant contribution from the +complex critical point away from the real plane. These complex critical points indeed give the curved +Regge geometries missing in the argument of the flatness problem. The parameter r including both +the boundary data and internal segment lengths determines the Regge geometry that is generically +curved. Hence the asymptotic formula (3.8) computes the weight of the Regge geometry contributing +to the amplitude and reduces A(K) in K to +� 1 +λ +� N +2� +M +� +I=1 +dlINl eλS(r,Z(r)) [1 + O(1/λ)] +(3.10) +at each kh. Here, Nl ∝ � +h (4jh) Jl[det +� +−δ2 +z,zS/2π +� +]−1/2 at Z(r), and r = {lI, jb, ξeb}. Given that +{lI} determines the Regge geometry on K, Eq.(3.10) is a path integral of Regge geometries with +the effective action S. The integration domain of lI includes curved geometries. The integral (3.10) +derived from the spinfoam amplitude defines an effective theory of Regge geometries. Indeed, if +we focus on the dominant contribution and neglect corrections of O(1/λ), by the stationary phase +approximation of (3.10), the effective action S gives the equation of motion +∂S +∂lI += 0, +I = 1, · · · , M, +(3.11) +which determines the effective dynamics of Regge geometry. S is generally complex, so (3.11) should +be analytically continued to complex lI, and thus the solution is generally not real. As we are going +to see in Section 7, we are mainly interested in the regime where the imaginary part of the solution +lI is negligible, then the solution has the interpretation of the Regge geometry. +In the following, we make the above general analysis concrete by considering the examples +of spinfoam amplitudes on a single 4-simplex and the double-∆3 complex. We also revisit briefly +the existing results on ∆3 complex for the completeness. We compute numerically the complex +critical points and S, confirming the contribution of the complex critical points to the spinfoam +amplitude. In particular, the double-∆3 model corresponding to M = 1 exhibits the non-trivial +effective dynamics of the Regge geometries. The effective dynamics approximates the classical Regge +calculus in the small-γ regime. +4 +Four-simplex amplitude +This section applies the above general procedure to the simplest situation: the 4-simplex amplitude. +In this case, there is no internal triangle: F = M = 0. The external parameter r only contains the +boundary data r = (jb, ξeb). The 4-simplex and its dual diagram are illustrated in Figure 2 (a) and +(b). The points of the 4-simplex v are labelled by (1, 2, 3, 4, 5). The five tetrahedra on the boundary +are labelled by +{e1, e2, e3, e4, e5} = {(1, 2, 3, 4), (1, 2, 3, 5), (1, 2, 4, 5), (1, 3, 4, 5), (2, 3, 4, 5)}. +These tetrahedra carry group variable gve ∈ SL(2, C). The triangle is shared by the tetrahedra and +carries an SU(2) spin jf, e.g., the tetrahedron e1 = (1, 2, 3, 4) and the tetrahedron e2 = (1, 2, 3, 5) +share the face f1 = (1, 2, 3). +2The shared faces are labelled by {f1, f2, ..., f10} = {(1, 2, 3), (1, 2, 4), (1, 2, 5), (1, 3, 4), (1, 3, 5), (2, 3, 4), (2, 3, 5), (3, 4, 5)}. +For convenience, in this section, the notations e and f mean that e ∈ {e1, ..., e5} and f ∈ {f1, ..., f10}. +– 9 – + +Figure 2. Panel (a) plots the 4-simplex v = (1, 2, 3, 4, 5). The boundary comprises five tetrahedra ei sharing +ten faces fi +2. Panel (b) is the dual complex of the 4-simplex. Five boxes correspond to boundary tetrahedra +carrying gve ∈ SL(2, C). The strands correspond to triangles carrying spins jf. The circles as endpoints of +strands carry boundary states ξef. The arrows represent the orientations of strands. +4.1 +The amplitude and parametrization of variables +According to (2.1), the EPRL 4-simplex amplitude with the boundary state has the following +expression [7–9, 43–45]: +Av (jf, ξef) = +� � +e +dgve δiσ3 (gve1) +� +(CP1)10 eS � +f +djf +π dΩzvf . +(4.1) +Here, all triangles are on the boundary, jf = jb. To fix the SL(2, C) gauge, gve1 is fixed to be +constant matrix diag(i, −i) (the timelike normal of the reference tetrahedron e1 is past-pointing). +The integrand in (4.1) is written as an exponential eS with the action +S = +� +f +2jf ln ⟨ξef, Zvef⟩ ⟨Zve′f, ξe′f⟩ +∥Zvef∥ ∥Zve′f∥ ++ iγjf ln ⟨Zve′f, Zve′f⟩ +⟨Zvef, Zvef⟩ . +(4.2) +The orientations of dual faces follow from Figure 2(c). To study the large-j behavior of the amplitude, +we scale all boundary spins jf → λjf by the parameter λ ≫ 1. The scaling of spins results in +the scaling of action S �→ λS, such that the integral (4.1) can be studied by the stationary phase +approximation. In the following, we firstly compute the real critical point {˚gve,˚zvf}, which is the +solution of the critical equation (3.1) and then describe the algorithm to compute the complex +critical point in the neighborhood. +To obtain the real critical point, we adopt the 4-simplex geometry used in [23, 24, 46] to generate +the boundary state. The coordinates of the five vertices Pa in Figure 2(a) in the Minkowski spacetime +are set as +P1 = (0, 0, 0, 0), P2 = +� +0, 0, 0, −2 +√ +5/31/4� +, P3 = +� +0, 0, −31/4√ +5, −31/4√ +5 +� +P4 = +� +0, −2 +√ +10/33/4, − +√ +5/33/4, − +√ +5/31/4� +P5 = +� +−3−1/410−1/2, − +� +5/2/33/4, − +√ +5/33/4, − +√ +5/31/4� +(4.3) +– 10 – + +1234 +1234 +j123 +J124 +J235 +1235 +1345 +J135 +/245 +/134 +J125 +J145 +1245Then, the 4-d normals of the tetrahedra are +Ne1 = (−1, 0, 0, 0), Ne2 = +� +5 +√ +22, +� +3 +22, 0, 0 +� +, Ne3 = +� 5 +√ +22, − 1 +√ +66, +2 +√ +33, 0 +� +Ne4 = +� 5 +√ +22, − 1 +√ +66, − 1 +√ +33, +1 +√ +11 +� +, Ne5 = +� 5 +√ +22, − 1 +√ +66, − 1 +√ +33, − 1 +√ +11 +� +. +(4.4) +The spinor ξef relates to the 3d normals nef by nef = ⟨ξef,⃗σξef⟩ (⃗σ are Pauli matrices). The Regge +boundary data of ten areas ˚jf, 3d normals ˚nef and the corresponding spinors ˚ξef of the 4-simplex +are listed in Appendix A. +With the Lorentzian Regge boundary data ˚r = (˚jf,˚ξef), we solve for the real critical point +(˚gve,˚zvf) which satisfies Re(S) = ∂gveS = ∂zvf S = 0. The results in the literature [8, 9] show that +there are exactly 2 real critical points, which have the interpretations as the geometrical 4-simplex +with opposite 4-orientations. The 4-simplex geometrical interpretation of the critical points results +in the same geometry as the one given by (4.3). We compute the real critical point following the +strategy described in [12, 14, 46], where the boundary data and critical points for a single 4-simplex +are studied in detail. The data of the real critical point (˚gve,˚zvf) is given in Appendix A. +By fixing the re-scaling gauge of zvf, each zvf can be parameterized with two real variables +xvf, yvf: +zvf = (1, xvf + iyvf)T . +(4.5) +gvei, i = (2, 3, 4, 5) are parameterized as +� 1 + +� +x1 +ve + iy1 +ve +� +/ +√ +2 +� +x2 +ve + iy2 +ve +� +/ +√ +2 +� +x3 +ve + iy3 +ve +� +/ +√ +2 +1+(x2 +ve+iy2 +ve)(x3 +ve+iy3 +ve)/2 +1+(x1ve+iy1ve)/ +√ +2 +� +, +x1 +ve, y1 +ve, x2 +ve, y2 +ve, x3 +ve, y3 +ve ∈ R. +(4.6) +Therefore, the 4-simplex action is a function in terms of all real variables x = (xvf, yvf, x1 +ve, y1 +ve, x2 +ve, y2 +ve, x3 +ve, y3 +ve) +for all f in {f1, ...f10} and e in {e2, ..e5}. The real critical point ˚zvf is in the form ˚zvf = (1,˚αvf)T , +where ˚αvf = ˚xvf + i˚yvf ∈ C. +It is convenient to set one of the critical points at the origin +˚x = {0, 0, ..., 0} by modifying (4.5) and (4.6) to +zvf = (1,˚αvf + xvf + iyvf)T , +gve = ˚gve +� 1 + +� +x1 +ve + iy1 +ve +� +/ +√ +2 +� +x2 +ve + iy2 +ve +� +/ +√ +2 +� +x3 +ve + iy3 +ve +� +/ +√ +2 +1+(x2 +ve+iy2 +ve)(x3 +ve+iy3 +ve)/2 +1+(x1ve+iy1ve)/ +√ +2 +� +. +(4.7) +With the parameterization in (4.7), the measures dgve and dΩzvf are +dgve = +1 +128π4 +dx1 +vedx2 +vedx3 +vedy1 +vedy2 +vedy3 +ve +���1 + x1ve+iy1ve +√ +2 +��� +2 +, +dΩzvf = +dxvf dyvf +⟨Zvef, Zvef⟩ ⟨Zve′f, Zve′f⟩. +(4.8) +As a result, the 4-simplex amplitude is in the form +Av = +� +d44x µ(x) eλS(r,x), +(4.9) +where r = (jf, ξef) are boundary data. The integral is 44 real-dimensional. In the following, we +– 11 – + +focus on a neighborhood K of ˚x. We have defined the local coordinates x ∈ R44 covering K. +4.2 +Deviating from the shape-matching +The amplitude Av has the real critical points with the non-degenerate Regge boundary data ˚r. +However, the real critical point disappears when the boundary data deviates away from ˚r. Considering +a neighborhood U of ˚r in the space of boundary data, such that any r ∈ U (different from ˚r) does +not correspond to any Regge geometry or vector geometry3. If we fix r ∈ U and scale the spins with +a large λ, there are two possible behaviors for the amplitude [8, 44] +• For r = ˚r, the amplitude has two critical points whose geometrical interpretations have +opposite orientations. S evaluated at critical points gives the Regge action of the 4-simplex +with opposite sign. Therefore, the asymptotic amplitude of the 4-simplex gives two oscillatory +phases +Av ≃ λ−12 � +N+eiλSRegge + N−e−iλSRegge � +. +(4.10) +• For r ̸= ˚r, it leads to no solutions to (3.1) and the exponentially suppressed amplitude. +To interpolate smoothly between the oscillatory phases and the exponential suppression in the +asymptotics (4.10), the discussion in section 3 suggests making r vary and introducing the complex +critical points. +The boundary data ˚r = {˚jf,˚ξef} of the Lorentzian Regge geometry satisfies the shape-matching +condition, i.e., five geometrical tetrahedra determined by ˚r on the boundary are glued with the +triangles matching in shapes. Consider the 4-simplex action S(r, x) in the neighborhood K × U of +(˚r,˚x). We define z ∈ C44 as the complexification of x, and S(r, z) extends holomorphically S(r, x) +to a complex neighborhood of ˚x. To avoid confusion, we note that the integration variables x are +complexified, while the boundary data r = (jf, ξef) is real. +Next, we let r = ˚r + δr vary, such that the shape-matching condition violates. We describe +below a parametrization of the tetrahedron shapes. A tetrahedron in R3 is determined by 4 points +{ ˜Pa, ˜Pb, ˜Pc, ˜Pd} up to a R3 ⋊ O(3) symmetry. We gauge fix the R3 ⋊ O(3) symmetry by choosing +˜Pa at the origin, ˜Pb along the z axis, and ˜Pc within the (y, z)-plane. The last point ˜Pd is not +constrained. Given the tetrahedron’s segment lengths, the coordinates of the points are fixed in +R3 by the above gauge fixing. For example, for the tetrahedron e2 = {1, 2, 3, 5}, ˚r implies that the +coordinates of the points in R3 are given by +˜P1 = (0, 0, 0), +˜P2 = (0, 0, −3.40), +˜P3 = (0, −2.94, −1.70), +˜P5 = (−0.651, −0.981, −1.70). +(4.11) +All other four tetrahedra can be described similarly, and the coordinates of the points in R3 are +determined by ˚r. The 3d face-normals ⃗n implied by the coordinates match with the data in Table 3 +up to a simultaneous SO(3) rotation. The spinors ξ associating with each face are given by +ξ = +1 +√ +2 +�√ +1 + w, x + iy +√1 + w +�T +, +if ⃗n = (x, y, w)T. +(4.12) +When we deform the boundary data, we keep the areas jf = ˚jf unchanged, while ξef are +deformed, such that the boundary data r is deformed to violate the shape-matching condition. We +3In the Lorentzian EPRL spinfoam amplitude, the critical points corresponding to the non-degenerate Regge +geometry are isolated critical points. +– 12 – + +move the vertices ˜Pa ∈ R3 to deform the tetrahedron shapes. For example, the vertices in (4.11) are +moved to new positions +˜P1 = (0, 0, 0), +˜P2 = (0, 0, −3.40 + δw(2) +2 ), +˜P3 = (0, −2.94 + δy(2) +3 , −1.70 + δw(2) +3 ), +˜P5 = (−0.651 + δx(2) +5 , −0.981 + δy(2) +5 , −1.70 + δw(2) +5 ). +(4.13) +In the notations δx(a) +i +, δy(a) +i +,δw(a) +i +, a = 1, · · · , 5 labels the tetrahedron, and i = 1, · · · , 5 labels the +variables associated to the vertex ˜Pi. There are 30 variables δx(a) +i +, δy(a) +i +,δw(a) +i +in total. We keep the +face areas unchanged. Then in each tetrahedron, Heron’s formula gives 4 constraint equations, each +corresponding to a face area. For example, in the tetrahedron e2 = {1, 2, 3, 5}, the equations are +� +� +� +� +� +� +� +� +� +A123(δw(2) +2 , δy(2) +3 , δw(2) +3 ) = 5 +A125(δw(2) +2 , δx(2) +5 , δy(2) +5 , δw(2) +5 ) = 2 +A135(δy(2) +3 , δw(2) +3 , δx(2) +5 , δy(2) +5 , δw(2) +5 ) = 2 +A235(δw(2) +2 , δy(2) +3 , δw(2) +3 , δx(2) +5 , δy(2) +5 , δw(2) +5 ) = 2. +(4.14) +At least in a neighborhood of the deformation, δw(2) +2 , δy(2) +3 , δw(2) +3 , δx(2) +5 +can be solved in terms of +δy(2) +5 , δw(2) +5 +from (4.14). The shape of the tetrahedron is parameterized by 2 variables δy(2) +5 , δw(2) +5 . +This way of parametrization is convenient in our computation. +However, it is different from +the known strategy, such as the Kapovich-Millson phase space [47] or using dihedral angles +[48]. For each tetrahedron, we adopt the same strategy. We have in total ten variables B ≡ +(δy(1) +4 , δw(1) +4 , δy(2) +5 , δw(2) +5 , δy(3) +5 , δw(3) +5 , δy(4) +5 , δw(4) +5 , δw(5) +5 , δw(5) +5 ) to parameterize the deformation of +five tetrahedra. The spinors ξef of each face can be expressed in terms of B according to (4.12). +At this point, the boundary data is r(B) = (jf, ξef(B)). We insert r(B) into the action S(r(B), x) +in (4.2), whose analytical extension is S(r(B), z). +Then, the complex critical equations are +F(B, z) = ∂zS(r(B), z) = 0, from which we solve for the complex critical point z(B). +The asymptotics of the 4-simplex amplitude with the boundary data violating the shape-matching +condition is given by (3.8). Here, the complex critical point z(B) inserting into the analytic continued +action gives S(r(B), z(B)). In contrast to the Regge action obtained from spinfoam asymptotics +in [8], S(r(B), z(B)) is an action of the twisted geometry. Indeed, S(r(B), z(B)) depends on the +degrees of freedom of semiclassical tetrahedra, which are not constrained by the shape-matching +condition. These degrees of freedom are beyond the Regge geometry and belong to the twisted +geometry of the boundary. +To solve the complex critical point, we can linearize (4.14) and obtain the linear solution +(δw(2) +2 , δy(2) +3 , δw(2) +3 , δx(2) +5 ) in terms of δy(2) +5 , δw(2) +5 . We can also linearize the complex critical equation +at B = (0, · · · , 0), and then solve for the complex critical point z = z(lin)(B). The solution z(lin)(B) +is a linear function of the perturbations B. The coefficients in the linear function can be computed +numerically. Inserting this linear solution into the action, we obtain S(r(B), z(lin)(B)) as a function +of B and expand it to the second order: +S(r(B), z(lin)(B)) = QijBiBj + LjBj + S0 +(4.15) +where the coefficients Qij, Lj can be computed numerically. S0 is the spinfoam action evaluated +at the real critical point with B = (0, · · · , 0). In Figure 3, we let B = (0, 0, 0, δw(2) +5 , 0, 0, 0, 0, 0, 0), +the red curves in (a) and (b) are the real part and imaginary part of S(r(B), z(lin)(B)) with δw(2) +5 +varying from -1 to 1. +The linear solution may have a large error when components in B are large. We apply the +Newton-Raphson method to numerically search for the solution, which is more accurate than the +linear solution. To compare with the linear solution in Figure 3, we still only focus on the deformation +– 13 – + +of e2 = {1, 2, 3, 5} and set δy(2) +5 += 0. We outline the procedure in the following. +For any given δw(2) +5 , we can numerically solve equations (4.14) for (δw(a) +2 , δy(a) +3 , δw(a) +3 , δx(a) +5 ). +There are multiple solutions. We select the solution that is within a neighborhood at (0, 0, 0, 0), +by requiring |δw2 +2 + δy2 +3 + δw2 +3 + δx2 +5| ≤ 4|δw2 +5|. The coordinates in (4.13) given by the solution +result in the 3d face normal vectors ⃗n and spinors ξ, which are the boundary data r violating the +shape-matching condition. +We apply the Newton-Raphson method to search for the complex critical point satisfying +∂zS = 0. An outline of the procedure in the Newton-Raphson method is given in Appendix B. In +Figure 3, the blue curves in (a) and (b) are the real part and imaginary part of the analytically +continued action at the complex critical points. This numerical result (blue curves) and the result +from the linear solution (red curves) are close when the deformation is small. However, the linear +solution is less accurate when the deformation is large. +Figure 3. In both panels, the blue curves are the numerical results with the Newton-Raphson method, +and the red curves are the results from the linear solution. Panel (a) is the real part of the analytically +continued action S at the complex critical points varying with δw(2) +5 . Panel (b) is the imaginary part of S +at the complex critical points varying with δw(2) +5 . The range of δw(2) +5 +is [-1,1]. +Figure 3 demonstrates the smooth interpolation between the oscillatory and exponential suppres- +sion behaviors mentioned at the beginning of this subsection. In addition to scaling large λ, we need +to consider the smooth deformation B. For any given λ, there exists sufficiently small deformation +B beyond the shape-matching, such that Re(S) is small, and thus the amplitude is not suppressed. +5 +Revisit the ∆3 amplitude +In this section, we revisit briefly the existing result on the spinfoam amplitude on the ∆3 complex, +for the completeness and preparing the discussion of the double-∆3 complex in the next section. +The ∆3 complex contains a single internal face F = 1 but has no internal segment M = 0. There is +an internal jh that is an integrated variable in the amplitude A(∆3) in (2.16). +The ∆3 complex and its dual cable diagram are represented in Figure 4. All tetrahedra and +triangles are spacelike. The Regge geometry on ∆3 is completely fixed by the Regge boundary data +{jb, ξeb} that is determined by the boundary segment lengths. In this section, we only focus on the +Regge boundary data, in contrast to the discussion of 4-simplex amplitude in the previous section. +The generalization to non-Regge boundary data should be straightforward. In terms of the notations +in Section 3, we have r = {jb, ξeb} as the boundary data. ˚r = {˚jb,˚ξeb} fixes the flat geometry g(˚r) +with deficit angle δh = 0. ˚x = {˚jh,˚gve,˚zvf} is the real critical point associated to ˚r. The data ˚r, +g(˚r), and ˚x are computed numerically in [12]. +– 14 – + +Figure 4. Panel (a) illustrates the simplicial complex ∆3 made by three 4-simplices {v1, v2, v3} and 12 +tetrahedra ei sharing nineteen faces fi. There are eighteen boundary faces and one internal face. Panel +(b) is the dual cable diagram of the ∆3 spinfoam amplitude: The boxes correspond to tetrahedra carrying +gve ∈ SL(2, C). The strands stand for triangles carrying spins jf. The strand with the same color belonging +to a different dual vertex corresponds to the triangle shared by the different 4-simplices. The circles as the +endpoints of the strands carry boundary states |jb, ξeb⟩. The arrows represent orientations. This figure is +adapted from [49]. +According to the general spinfoam amplitude (2.16) and the spinfoam action (2.17), the ∆3 +amplitude A(∆3) can be written as +A (∆3) = +� +kh∈Z +2λ +� +djhdλjh +� +[dgdz]eλS(k), +S(k) = S + 4πi +� +h +jhkh. +(5.1) +For each kh in (5.1), the real critical point {˚jh,˚gve,˚zvf} happens only when the boundary data +satisfies the accidental flatness constraint (3.3). +Given the boundary data ˚r corresponding to δh = 0, we consider its neighborhood U in the +space of the non-degenerate Regge boundary data, such that any boundary data r ∈ U satisfies +|γδh| < 4π. For large λ, the sectors with kh ̸= 0 do not give dominant contribution to A(∆3) as far +as r ∈ U. If we arbitrarily fix the boundary data r ∈ U and scale λ large, the amplitude has two +asymptotic behaviors analogs to the discussion at the beginning of Section 4.2 +• For the boundary data that corresponds to a flat Regge geometry, there is a real critical point, +and the amplitude gives an oscillatory phase. +• For the boundary data corresponding to a curved Regge geometry, there are no real critical +points, and the amplitude is exponentially suppressed. +However, this way of presenting the asymptotic behavior leads to confusion about the flatness +problem. From the discussion in Section 3, it is clear that there is a smooth interpolation between +the oscillatory phase and the exponential suppression behaviors, since the boundary data varies +– 15 – + +124 +2 +2 +1123 +1256 +134 +/245 +J126 +125 +J234 +J235 +256 +3 +5 +1345 +j135 +1345 +4 +6 +1456 +J456 +4 +6 +3456 +J346smoothly. The interpolation is obtained by applying the method of the complex critical point. The +formal discussion of the complex critical point and the asymptotic behavior of this model have been +given in [12]. Figure 5(a) plots eλRe(S) in the asymptotic formula (3.8) versus δh determined by +the boundary data and demonstrates the smooth interpolation between the above two asymptotic +behaviors. Letting the boundary data vary at the same time as scaling λ, we find the boundary +data for the curved geometries with small nonzero δh for any λ, such that the amplitude A(∆3) is +not suppressed, shown in Figure 5(b). The range of δh for non-suppressed A(∆3) is nonvanishing as +far as λ is finite. The range of δh is enlarged when γ is small, shown in Figure 5(c). δh that leads to +non-suppressed eλ Re[S(Z(r))] satisfies the bound +|γδh| ≲ λ−1/2. +(5.2) +The above result provides evidence for the emergence of curved geometries from the spinfoam +amplitude. The bound (5.2) is consistent with the earlier proposal [11] and the result in the effective +spinfoam model [13, 28, 50]. So far, the bound (5.2) has only been confirmed in the regime of small +or finite γ as we are going to see in Section 7, in the large-γ regime, geometries are violating the +bound (5.2) but still giving a non-suppressed contribution to the spinfoam amplitude. +Figure 5. Panel (a) plots eλRe(S) versus the deficit angle δh at λ = 1011 and γ = 0.1 in A(∆3). The panels +(b) and (c) are the contour plots of eλRe(S) as functions of (λ, δh) at γ = 0.1 and of (γ, δh) at λ = 5 × 1010 +in A(∆3). They demonstrate the (non-blue) regime of curved geometries where the spinfoam amplitude is +not suppressed. These figures first appeared in [12]. +6 +Double-∆3 amplitude and effective action +6.1 +Some setups +The ∆3 complex does not have any internal segment, and the boundary data determines the Regge +geometry completely. A(∆3) does not give the lI-integral as in (3.10) by M = 0, so the effective +dynamics of Regge geometry is trivial. In this section, we study the spinfoam amplitude on the +“double-∆3” complex (see Figure 6(a)), which is denoted by ∆2 +3. The double-∆3 complex contains a +single internal segment, so M = 1, and thus A(∆2 +3) gives (3.10) as 1-dimensional integral. So the +double-∆3 complex admits non-trivial effective dynamics of the Regge geometry. Note that the +same complex is also considered in the context of the effective spinfoam model [50]. +The double-∆3 complex glues a pair of ∆3 complex around the internal segment (1, 2). The +complex has seven points P1..., P7. The 4-simplices are given by +{v1, · · · , v6} = {(1, 2, 3, 4, 6), (1, 2, 3, 5, 6), (1, 2, 4, 5, 6), (1, 2, 3, 4, 7), (1, 2, 3, 5, 7), (1, 2, 4, 5, 7)}. +– 16 – + +0.0010 +0.002 +eARe(s) +0.0008 +0.001 +0.0006 +6 +6 0.000 +0.0004 +O Flat geometry +○ Curved geometry +0.6 +-0.001 +0.7 +0.0002 +0.8 +0.9 +0.99 +0.0000 +-0.002 +2 × 1010 4 ×1010 6 ×1010 8 × 1010 1 × 1011 +0 +0.00 +0.02 +0.04 +0.06 +0.08 +0.10 +入 +y +(b) +(c)The tetrahedra are labelled by {e1, · · · , e21}4. There are twelve boundary tetrahedra and nine +internal tetrahedra among them. jh = {j123, j124, j125, j126, j127} are carried by 5 internal triangles, +whose dual faces are bounded by red loops shown in the dual diagram Figure 6 (b). Since there is +only one internal segment (1, 2) and all other segments are on the boundary, the boundary data and +the length l12 of the internal segment determine the Regge geometry g(r) on ∆2 +3. +Following the +Figure 6. A complex made of six simplices sharing the bulk edge (1, 2) with length l12 (the red line in panel +(a)). In panel (a), the boundary edges are colored black, blue, violet and cyan. The bulk edge is colored red. +Panel (b) is the dual complex of the triangulation. The internal faces carrying j123, j124, j125, j126, j127 are +bounded by red loops, and other faces are boundary faces. +procedure described in (3.6) and (3.5), we pick up the internal spin j123 and express the spinfoam +amplitude as +A +� +∆2 +3 +� += +� +dj123 Z (j123; jb, ξeb) , +Z (j123; jb, ξeb) = +� +{kh} +� +4 +� +¯h=1 +dj¯h +5 +� +h=1 +2λ τ[−ϵ,λjmax+ϵ](λjh) +� +dµ(g, z) eλS(k), +(6.1) +where j¯h = {j124, j125, j126, j127}. The external data of Z is rl = {j123(l12); jb, ξeb} including both +the boundary data and j123(l12). Identifying γjf to be the area of f (in Planck unit), the Heron’s +formula +γj123(l12) = 1 +4 +� +4l2 +12l2 +13 − (l2 +12 + l2 +13 − l2 +23)2 +(6.2) +relates j123 to the internal segment length l12 and boundary segment lengths l13, l23. We consider +4The tetrahedra are {e1, · · · , e21} = {{1, 2, 3, 4}, {1, 2, 3, 6}, {1, 2, 4, 6}, {1, 3, 4, 6}, {2, 3, 4, 6}, {1, 2, 3, 5}, {1, 2, 5, 6}, +{1, 3, 5, 6}, {2, 3, 5, 6}, {1, 2, 4, 5}, {1, 4, 5, 6}, {2, 4, 5, 6}, {1, 2, 3, 7}, {1, 2, 4, 7}, {1, 3, 4, 7}, {2, 3, 4, 7}, {1, 2, 5, 7}, {1, 3, 5, 7}, +{2, 3, 5, 7}, {1, 4, 5, 7}, {2, 4, 5, 7}}. +– 17 – + +X147 +1145 +12 +127 +247 +113 +2 +1245 +1235 +1234 +124 +2346 +j245/ +456 +N25 +H136 +J146 +56 +J126 +6the Regge boundary data that determines all the boundary segment lengths. We can always make a +local change of the real variable j123 → l12 within a neighborhood K of a given Regge geometry, +where the correspondence j123 ↔ l12 is 1-to-1. +In the following discussion, we only focus on the case with kh = 0. The Regge geometries under +consideration are of small deficit angles. The following describes the procedure to compute the +complex critical points Z(rl) of Z. +We embed the double-∆3 complex in (R4, ηIJ) and determines a flat Regge geometry with all +tetrahedra spacelike. We assign the following coordinates to the points, +P1 = (0, 0, 0, 0), +P2 = (−0.0680, −0.220, −0.532, −1.33) , +P3 = (0, 0, 0, −3.40) , +P4 = (−0.240, −0.694, −0.981, −1.70) , +P5 = (0, 0, −2.94, −1.70) , +P6 = (0, −2.77, −0.981, −1.70) , +P7 = (−2.47, −3.89, −1.36, −1.91) . +From the coordinates, we can compute the length of the segments of the triangulation by using +lij = +� +ηIJ(Pi − Pj)I(Pi − Pj)J. +(6.3) +with ηIJ = Diag({−1, 1, 1, 1}) the Minkowski metric. The segment lengths are shown in Table 1. +The triangles within a 4-simplex are classified into two categories [8]: The triangle corresponds to +Table 1. Each cell of the table is the segment length for vertice Pi and Pj. +i +lij +j +1 +2 +3 +4 +5 +6 +7 +1 +1.45 +3.40 +2.07 +3.40 +3.40 +3.81 +2 +1.45 +2.14 +0.729 +2.45 +2.62 +2.96 +3 +3.40 +2.14 +2.07 +3.40 +3.40 +3.62 +4 +2.07 +0.729 +2.07 +2.07 +2.07 +2.34 +5 +3.40 +2.45 +3.40 +2.07 +3.40 +3.41 +6 +3.40 +2.62 +3.40 +2.07 +3.40 +7 +3.81 +2.96 +3.62 +2.34 +3.41 +the thin wedge if the inner product between the timelike normals of the two adjacent tetrahedra is +positive, otherwise the triangle corresponds to the thick wedge. The dihedral angle θv,ei,ej are given +by: +thin wedge: +Nvei · Nvej = cosh θv,ei,ej, +thick wedge: +Nvei · Nvej = − cosh θv,ei,ej, +(6.4) +where the inner product is the Minkowski inner product defined by η. Then we check the deficit +angles δhi associated to the shared triangles hi +0 = δh1 = θv1,e1,e2 + θv2,e2,e6 + θv4,e1,e13 + θv5,e6,e13 ≈ 0.514 + 0.464 − 0.575 − 0.404, +0 = δh2 = θv1,e1,e3 + θv3,e3,e10 + θv4,e1,e15 + θv6,e10,e15 ≈ 1.08 − 1.02 − 1.30 + 1.24, +0 = δh3 = θv2,e6,e7 + θv3,e7,e10 + θv5,e6,e17 + θv6,e10,e17 ≈ −0.360 − 0.481 + 0.414 + 0.426, +0 = δh4 = θv1,e2,e3 + θv2,e2,e7 + θv3,e7,e10 ≈ −0.723 − 0.208 + 0.931, +0 = δh5 = θv4,e1,e15 + θv5,e13,e17 + θv6,e15,e17 ≈ −0.903 + 1.20 − 0.301, +(6.5) +which implies the Regge geometry is flat. The data of the flat geometry determines the external data +˚rl for the partial amplitude Z, which has the real critical points (˚j¯h,˚gve, ˚zvf) corresponding to this +flat Regge geometry and endowing the consistent 4-orientations to all 4-simplices. The boundary +– 18 – + +data of the flat geometry and the real critical point can be found in Appendix C.1, and Mathematica +code can be found in [51] and [52]. In this case, given the boundary data, the flat Regge geometry is +the solution of the classical Regge equation of motion, and it is also the solution (˚j¯h,˚gve, ˚zvf) to +the critical equations from the spinfoam amplitude. +We are going to compare the classical Regge dynamics and the spinfoam effective dynamics +for curved geometries. This comparison is based on the numerical computations. In concrete, +we deform the boundary segment length l35 → l35 + 10−3 but keep the other boundary segment +lengths unchanged. The boundary data does not admit any flat geometry on ∆2 +3 (see Figure 7(b))5. +With this deformation, a classical Regge solution (i.e. the solution to the classical Regge equation +δSRegge = 0) gives the deficit angles +δh1 = 0.0118, +δh2 = 0.0661, +δh3 = −0.0215, +δh4 = −0.0236, +δh5 = −0.0252, +(6.6) +which implies that the classical Regge dynamics gives curved geometry. We fix the boundary data +and vary the internal segment length l12 = L0 + δL where L0 = 1.45 is the length l12 in the flat +geometry. The change of l12 is denoted by δL with δL ∈ [−0.0129, 0.00251] 6. The classical Regge +action SRegge as a function of δL is plotted in Figure 7(a). The above solution leading to (6.6) is +close to the origin δL = 0 and is denoted by δLRegge +c +. There exists another Regge solution in δL < 0 +and far from δL = 0 as shown in Figure 7(a). We denote this solution by δ�LRegge +c +. +Likely, the solution δ�LRegge +c +is a discretization artifact because when smoothly deforming the +boundary data l35 back to the one for the flat geometry, δLRegge +c +reduces back to the flat solution. +In contrast, δ�LRegge +c +still reduces to a curved Regge geometry. Some boundary data also exist such +that the second solution δ�LRegge +c +disappears. Nevertheless, we will take into account both solutions +δLRegge +c +and δ�LRegge +c +in discussing the effective dynamics in Section 7. +The boundary data (jb, ξef) and the corresponding pseudo-critical points (j0 +h, g0 +ve, z0 +vf) for +the curved geometry with the boundary segment length l35 → l35 + 10−3 and the internal edge +l12 = L0 + δLRegge +c +are listed in Appendix C.2. +Notice that the geometrical areas in the boundary data relate to jb by ab = γjb, and the area ab +relates to the lengths lij by Heron’s formula. The following discussion involves fixing the geometrical +area ab and performing computations at different Barbero-Immirzi parameter γ, so this leads to +different jb at different γ. Fixing the geometrical area instead of fixing jb is useful when we compare +with the Regge action SRegge, since SRegge only depends on the geometrical boundary data. +6.2 +Numerical computing the effective action +Given the boundary condition (jb, ξeb) corresponds to the above Regge boundary data with the +deformed l35, and given any l12 and j123(l12) taking value inside a neighborhood of the value for the +flat geometry, we find the pseudo-critical point (j0 +¯h, g0 +ve, z0 +vf) close to the real critical point inside +the real integration domain. The pseudo-critical point only satisfies Re(S) = ∂gveS = ∂zvf S = 0 +but does not necessarily satisfy ∂j¯hS = 0. The pseudo-critical point (j0 +¯h, g0 +ve, z0 +vf) is the critical +point of the spinfoam amplitude with fixed jh, jb [9], and endows the Regge geometry g(r) and +consistent 4-simplex orientations to ∆2 +3 complex7. It reduces to the real critical point (˚j¯h,˚gve,˚zvf) +5If the boundary data admitted a flat Regge geometry on the complex, the flat geometry would be a solution to +the Regge equation. However, the solution of the Regge equation is a curved geometry with the given boundary data, +contradicting the assumption of admitting the flat geometry. +6The range used here is restricted by the existence of curved Regge geometry with all tetrahedra spacelike. +7Since the correspondence between j123 and l12 is not 1-to-1 globally, it might be possible to have multiple pseudo- +critical points corresponding to different Regge geometries with the same value of j123. However, in our numerical +analysis, the other l12 from the same j123 does not satisfy the triangle inequality. Therefore all pseudo-critical points +correspond to the same Regge geometry but with different 4-simplex orientations, although we only focus on a fixed +orientation. +– 19 – + +Figure 7. Panel (a) is the Regge action varying with δL when we deform the boundary segment length +l35 → l35 + 10−3 from the boundary data of the flat geometry. In this case, the Regge solutions are given +by δLRegge +c +≃ 0.000439 and δ�LRegge +c +≃ −0.00834. Panel (b) is +� +(�5 +i=1 δ2 +hi)/5 versus δL with the deformed +boundary data. All geometries in the range of δL are not flat. The minimum of +� +(�5 +i=1 δ2 +hi)/5 is 0.013. +when rl = ˚rl corresponds to the flat geometry on ∆2 +3. As the deformation of segment length l35 is +small, this curved geometry is close to the flat geometry, so (j0 +¯h, g0 +ve, z0 +vf) is close to (˚j¯h,˚gve,˚zvf) in +the integration domain. The data for the pseudo-critical point is listed in Appendix C.2. +In this computation, we still adopt the similar parametrizations of variables as in (4.5), (4.6), +and (4.7), but with the pseudo-critical points as the origin. The parametrizations of the group +element gv1e2, gv2e7, gv3e3, gv4e13, gv5e17, gv6e15, gv1e1, gv2e6, and gv3e10 are upper-triangular matrices +due to the SU(2) gauge fixing at 9 internal tetrahedra +gve = g0 +ve +� +1 + x1 +ve +√ +2 +x2 +ve+iy2 +ve +√ +2 +0 +∗ +� +, +(6.7) +where the entry ∗ is determined by det(gve) = 1. The internal spin j¯h is parametrized as +j¯h = j0 +¯h + j¯h, +j¯h ∈ R. +(6.8) +As a result, for kh = 0, the spinfoam amplitude A(∆2 +3) and Z(j123) in (6.1) can be written in the +form of +A(∆2 +3) = +� +dl12 +���� +∂j123 +∂l12 +���� Z(j123(l12); jb, ξeb), +Z(j123(l12); jb, ξeb) ∼ +� +d241x µ(x)eλS(rl,x), +rl = (j123(l12), jb, ξeb) +(6.9) +where x ≡ (x1 +ve, y1 +ve, x2 +ve, y2 +ve, x3 +ve, y3 +ve, xvf, yvf, j¯h). +The parametrizations of (l12, x) define the +coordinate chart covering the neighborhood K enclosing ˜x0 = (j123, x0) = (j0 +h, g0 +ve, z0 +vf), and +˚˜x = (˚j123,˚x) = (˚jh,˚gve,˚zvf). This neighbourhood is large enough since the parametrizations are +valid generically. The pseudo-critical point is x0 = (0, 0, ..., 0), which contains 241 zero components. +Here we use “∼” instead of “=” because (1) we only consider kh = 0 but ignore other kh terms8, (2) +we only focus on the contribution from the neighborhood K enclosing a single pseudo-critical point9. +8The integrals in the neighborhood K with kh ̸= 0 give exponentially suppressed contributions. +9there may exist other pseudo-critical points outside K in Z, e.g. the ones corresponding to different orientations +– 20 – + +SL +SLIn our discussion, we only consider the effective dynamics within a sector of Regge geometries with +the fixed 4d orientation. +We compute the complex critical point of Z for any given external data rl: Here, both S(r, x) +and µ(x) are analytic in the neighborhood K of x0. S(r, x) can be analytically continued to a +holomorphic function S(rl, z), and z ∈ C241 is in a complex neighborhood of x0. The analytic +continuation is obtained by simply extending x ∈ R241 to z ∈ C241. The formal discussion of the +analytic continuation of the spinfoam action is given in [14]. We fix the boundary data to be the one +resulting in (6.6) and vary the length l12 = L0 + δL, where L0 = 1.45 (the value of l12 in Table 1) +and the change of l12, δL ∈ [−0.0129, 0.00251]. For any given δL, combining the boundary data, we +repeat the steps above (from the beginning of this subsection) to reconstruct the Regge geometry and +the corresponding pseudo-critical point. Taking the pseudo-critical point as the starting point, we +apply the Newton-Raphson method by repeating the steps in (B.2) - (B.8) to numerically compute +the complex critical point Z(rl) for a sequence of δL. By evaluating S at the complex critical point +and apply the asymptotic formula (3.8), we obtain the following asymptotic behavior of Z and +A(∆2 +3) for the dominant contribution from the integral on K +Z (j123(l12); jb, ξeb) ∼ +� 1 +λ +� 241 +2 +Nl eλS(rl,Z(rl)) [1 + O(1/λ)] , +A +� +∆2 +3 +� +∼ +� 1 +λ +� 241 +2 � +dl12 +���� +∂j123 +∂l12 +���� Nl eλS(rl,Z(rl)) [1 + O(1/λ)] , +(6.10) +where Nl = µ(Z(rl)) det +� +−∂2 +z,zS(rl, Z(rl))/2π +�−1/2. Effectively, A +� +∆2 +3 +� +gives a path integral of +Regge geometry on ∆2 +3. S (rl, Z (rl)) is the effective action for the Regge geometry in the large-λ +regime of the spinfoam amplitude. The stationary phase approximation of the l12-integral in (6.10) +relates to the variation of S (rl, Z (rl)) with respect to l12. The effective equation of motion +∂l12S (rl, Z (rl)) = 0 +(6.11) +determines the effective dynamics of Regge geometry. +6.3 +Comparing to Regge action +It is interesting to compare the effective action S (rl, Z (rl)) to the classical Regge action SRegge +since both actions define the dynamics of Regge geometry. The definition of Regge action SRegge(l12) +is reviewed in Appendix D. In order to compare, we compute and plot the real and imaginary parts +SR and SI of S (rl, Z (rl)) respectively, +S (rl, Z (rl)) = SR(γ, δL) + iSI(γ, δL), +(6.12) +We view both SR and SI as functions of two variables γ and δL, and we compute the numerical +values of SR and SI with samples of γ ∈ [10−9, 106] and δL ∈ [−0.0129, 0.00251]. +It is known that the spinfoam action contains an overall phase, which needs to be subtracted +to compare to the Regge action. We denote the overall phase by φ(γ). This overall phase can be +computed numerically by inserting the pseudo-critical point (j0 +¯h, g0 +ve, z0 +vf) in the spinfoam action S +and subtracting the Regge action at the corresponding geometry. Generally, we have +φ(γ) = α/γ +(6.13) +of 4-simplices. But our discuss only focuses on the critical points inside K. +– 21 – + +Figure 8. The red curves plots the Regge action as a function of δL. In comparison to the Regge action, +the blue curves plots S′ +I of the analytic continued spinfoam action at complex critical points. The green +curve plots the real part SR of the analytic continued spinfoam action at complex critical points. +where the coefficient α depends on the boundary data. In terms of the spinfoam variables, the +overall phase comes from the γ-independent terms in S and is linear to the boundary spins φ ∼ jb, +but here we fix the boundary area and let γ vary, then φ ∼ ab/γ. The numerical value of α is +α = 0.003993 resulting from our setup of the boundary data. In general, the overall phase in the +spinfoam action can be cancelled by the phase choice of boundary ξeb. To remove the overall phase +from SI, we define S′ +I by +SI(γ, δL) = −S′ +I(γ, δL) + φ(γ). +(6.14) +S′ +I as a function of δL is compared to the classical Regge action for different values of γ in Figure 8. +The minus sign in front of S′ +I relates to the 4-simplex orientation in the real and pseudo-critical +– 22 – + +Regge Action +S' at complex critical points +--- Sr at complex critical pointsFigure 9. Panels (a) and (b) are log-log plots of the distances (7.5) between the spinfoam and Regge +solutions in a neighbourhood of δL = 0 as a function of γ. The boundary data has the boundary segment +length l35 deformed from the flat geometry by l35 → l35 + 10−3 for (a) and l35 → l35 + 10−10 for (b). +Figure 10. Panels (a) show the real part of the spinfoam solution δLSpinfoam +c +v.s. log-scaled γ value with +the boundary data deformed from the flat geometry by l35 → l35 + 10−3. Panels (b) is the log-log plot of +the absolute value of the imaginary parts of the spinfoam solution δLSpinfoam +c +as a function of γ. +points. As indicated by Figure 8, S′ +I well-approximates the Regge action for small γ with negligible +corrections. When increasing γ, S′ +I gives nontrivial corrections to the Regge action. +For any given γ, the real part SR is always negative, and |SR| is larger for larger |δL|, so eλS is +smaller for larger |δL|. However, if we fix δL and vary γ, |SR| is smaller so eλS is less suppressed for +any λ, when γ is smaller. In other words, the smaller γ opens a larger range of δL, in which |SR| is +small and eλS is not suppressed for a given λ. In this range of δL, the numerical result indicates +that S (rl, Z (rl)) well-approximates the Regge action. The similar situation has appeared in the +∆3 amplitude, where the amplitude with smaller γ admits a wider range of curved geometries (see +Figure 5(c)). +7 +Solutions of effective dynamics on double-∆3 +7.1 +Spinfoam complex critical point and the Regge solution δLRegge +c +The above discussion compares the effective action S(rl, Z(rl)) to the classical Regge action. It is +also interesting to compare the solution of the effective equation ∂l12S(rl, Z(rl)) = 0 to the solution +of the Regge equation. By the above computation, the real and imaginary parts of S(rl, Z(rl)) +are obtained as the numerical function. Numerically solving the effective equation involves finding +– 23 – + +7.37 +× 10-11 +-5Figure 11. The log-log plot of the average of the absolute value of the imaginary part of the complex +critical point v.s. γ. +Figure 12. Panels (a) are the log-log plot of the negative real parts of ˜S(r′, δL, z) at the complex critical +points z = ˜Z(r′, δL) as a function of γ with the boundary data deformed from the flat geometry by +l35 → l35 + 10−3. +Panels (b) show the imaginary parts of ˜S(r′, δL, z) at the complex critical points +z = ˜Z(r′, δL) v.s. log-scaled γ. We subtract the overall phase φ(γ) from Im[ ˜S(r′, δLSpinfoam +c +, ˜Z)] and add +a minus sign in plotting (b). In Panel (b), the overall phase φ(γ) ≃ 0.003993γ−1, and the maximum and +minimum of the plot range are Maxa ≃ 0.121606 and Mina ≃ 0.121596. +the possible complex roots of numerical derivatives of the complex S(rl, Z(rl)), which requires an +estimation of S(rl, Z(rl)) on the complex δL plane and may give a relatively large numerical error. +In the following, we introduce an alternative strategy, which computes the solution of the effective +equation more efficiently. +Instead of introducing the partial amplitude Z, we consider the full spinfoam amplitude, which +can be written as the following integral for the same contribution as in (6.10) +A(∆2 +3) ∼ +� +dδLd241x µ(δL, x)eλ ˜S(r′,δL,x). +(7.1) +Here the external parameter r′ is just the boundary data r′ = (jb, ξeb). ˜S(r′, δL, x) is the spinfoam +action S with j123 = j123(l12) and l12 = L0 + δL. +Recall that δLRegge +c +is a solution of the classical Regge equation. The Regge geometry with +δLRegge +c +corresponds to a pseudo-critical point of ˜S(r′, δL, x). Both ˜S(r′, δL, x) and µ(δL, x) are +analytic in the neighbourhood of this pseudo-critical point. Therefore, ˜S(r′, δL, x) and µ(δL, x) can +be analytic continued to the holomorphic functions ˜S(r′, δL, z) and µ(δL, z), where (δL, z) ∈ C242 +– 24 – + +7.18 × 10-6[S(r', SLSpinfoam +[S(r', SLSpinfoam +Maxa +Minais in a complex neighborhood of the pseudo-critical point. We fix the boundary data r′ to be the +same as the one used in Figure 7. Since r′ is a small deformation from the boundary data of the flat +geometry, the neighbourhood covers the real critical point corresponding to the flat geometry and +the boundary data before the deformation. +For each γ, we would like to numerically compute the complex critical points (δL, z) = +(δLSpinfoam +c +, ˜Z)(r′) as the solution to the following equations, +∂z ˜S(r′, δL, z) = 0, +(7.2) +∂δL ˜S(r′, δL, z) = 0. +(7.3) +Since we fix the boundary data r′ and vary γ, the complex critical points give a continuous trajectory +parametrized by γ in the complex space of (δL, z). In the numerical computation, we sample a +sequence of γ ∈ [10−9, 106] and compute the complex critical point for each γ by the Newton-Raphson +method, following the steps in (B.2) - (B.8). For any γ, the recursion of the Newton-Raphson +method can be initialized at the pseudo-critical point and give the convergent result within the +desired tolerance. Moreover, all resulting complex critical points depend smoothly on the boundary +data δl35 and reduces to the real critical point when δl35 → 0 (see Figure 13 for an example). +Figure +13. +The +red +points +are +the +list-plot +of +the +norm +of +the +complex +critical +point +(δLSpinfoam +c +, ˜Z) v.s. +the deformation of the boundary segment length δl35. +For any complex criti- +cal points (δLSpinfoam +c +, ˜Z) = (δLSpinfoam +c +, z1, z2, · · · , z241), the norm is defined as ∥(δLSpinfoam +c +, ˜Z)∥ = +����δLSpinfoam +c +��� +2 ++ |z1|2 + |z2|2 + · · · + |z241|2. +Here, the boundary segment length l35 is deformed from +the flat geometry by l35 → l35 + δl35 at γ = 10−6, δl35 ∈ [0, 10−3]. The blue point is the complex critical +point as δl35 = 10−3, and the green point is the real critical point at the origin (0, 0) corresponding to the flat +geometry. The cyan curve represents the fitted function ∥(δLSpinfoam +c +, ˜Z)∥ ≃ 1.97×106 δl35−5.49×107 (δl35)2. +The solution δL from (7.2) and (7.3) is the same as the solution of ∂δLS(rl, Z(rl)) = 0. Indeed, +0 = ∂δLS(rl, Z(rl)) = ∂S(rl, Z(rl)) +∂rl +��� +Z(rl) · ∂rl +∂δL + ∂S(rl, Z(rl)) +∂Z(rl) +��� +rl +· ∂Z(rl) +∂δL += ∂S(rl, Z(rl)) +∂rl +��� +Z(rl) · ∂rl +∂δL = [∂δLS(rl, z)]z=Z(rl) , +(7.4) +where we have used ∂S(rl, Z(rl))/∂Z(rl)|rl = 0. Z(rl) depends on δL. z = Z(rl) is the solution of +(7.2), when analytic continuing δL → δL. The result [∂δLS(rl, z)]z=Z(rl) = 0 from (7.4), followed by +analytic continuing δL → δL, is equivalent to (7.3) with the solution of (7.2) inserted. +The complex critical point gives δL ≡ δLSpinfoam +c +(γ) as a trajectory parametrized by γ in a +complex neighborhood at δL = 0. This solution is compared to the Regge solution δLRegge +c +≃ 0.000439 +– 25 – + +Figure 14. Panel (a) is the log-log plot of the distance between the spinfoam solution and the Regge +solution in a neighborhood of δ�L = δ�LRegge +c +as a function of γ. Panel (b1) shows the real of the spinfoam +solution δ�LSpinfoam +c +v.s. γ. Panel (b2) is the log-log plot of the imaginary parts of the spinfoam solution +δ�LSpinfoam +c +v.s. γ. Panel (c1) is the real parts of ˜S(r′, δ �L, z) at the complex critical points v.s. γ, and the +small figure in (c1) is the log-log plot. Panel (c2) plots the imaginary parts of ˜S(r′, δ �L, z) at the complex +critical points v.s. γ. +(recall Figure 7(a)). This solution δLSpinfoam +c +(γ) is complex generically, although it is close to the +real axis, especially for small γ. Figure 9 (a) demonstrates the distance (in the complex plane) +between the spinfoam solution δLSpinfoam +c +(γ) and the classical Regge solution δLRegge +c +: +��δLSpinfoam +c +(γ) − δLRegge +c +�� . +(7.5) +This distance is small in the small-γ regime. So the classical Regge dynamics is reproduced by the +spinfoam effective dynamics for small γ. This result is consistent with comparing the actions in +Figure 8. This result is also consistent with some earlier arguments in [18–21] about the semiclassical +approximation of spinfoams with small γ. +– 26 – + +[sLSpinfoam +SLRegge +c +Im[SLSpinfoam] +Re[S(r', SLSpinfoam, 2)] +-Im[S(r', 8LSpinfoam, Z)) + p() +0.135: +0.130 +0.125 +[Re[S(r', 8LSpinfoam, 2)] +0.120 +0.115 +0.110 +10-10 +10-7 +10-4 +0.1 +100Figure 15. Figure is the log-log plot of eλRe[ ˜ +S(r′,δLSpinfoam +c +, ˜ +Z)] (blue curve) and eλRe[ ˜ +S(r′,δ �LSpinfoam +c +, ˜ +Z)] (red +curve) as a function of λ at γ = 10−8. +The distance (7.5) becomes larger when increasing γ. It indicates that the spinfoam amplitude +with larger γ gives larger correction to the classical Regge solution. Therefore the effective theory in +the large-γ regime has more significant difference from the Regge gravity. Furthermore, the distance +(7.5) stabilizes in the large-γ regimes, as shown in Figure 9(a). The distance value where it stablizes +becomes smaller when the boundary data is closer to the one for the flat geometry, by comparing +Figure 9(a) and (b). The small and large γ regimes might be viewed as two phases of the spinfoam +amplitude. The effective dynamics is closer to the Regge dynamics for small γ but more different +from the Regge dynamics for large γ. +The critical point (δLSpinfoam +c +, ˜Z)(r′) is generally complex for every γ (see Figure 11). Figure +12(a) and (b) plot the analytic continued action ˜S(r′, δL, z) (with the overall phase φ(γ) removed) +evaluated at the complex critical points for a large number of samples of γ. The real part Re( ˜S) is +close to zero for both the small-γ and large-γ regimes, so eλ ˜ +S in the asymptotic formula (3.8) is not +suppressed for large λ for both the small and large γ. The non-suppressed eλ ˜ +S for small γ has been +anticipated since it can be predicted by the bound (5.2). But the non-suppressed eλ ˜ +S with large +λ in the large-γ regime violates the bound (5.2). This result suggests that the bound (5.2) is not +universal but only valid for the small or finite γ. +Figures 9(b) plots +��δLSpinfoam +c +− δLRegge +c +�� for the different boundary data, which deform the +boundary data of the flat geometry by l35 → l35 + 10−10. This boundary data is closer to the +boundary data for the flat geometry. The results are qualitatively similar to the results from the +previous boundary data, although the maximum of +��δLSpinfoam +c +− δLRegge +c +�� become smaller comparing +to the results from the previous boundary data. Changing the boundary data seems not to shift the +location in the γ-space, where the small-γ phase (where (7.5) is small) transits to the large-γ phase +(where (7.5) is stablizes), as suggested by comparing Figures 9 (a) and (b). +7.2 +Complex critical point and the other Regge solution δ�LRegge +c +Recall Figure 7(a) that there is another classical Regge solution δL = δ�LRegge +c +with the boundary +condition under consideration. This solution corresponds to a different pseudo-critical point, which we +use as the starting point of initializing the recursion in the Newton-Raphson method. Following the +same procedure discussed above, we obtain a new trajectory of complex critical points parameterized +by γ. The complex critical point gives δL = δ�LSpinfoam +c +(γ), which is generically complex. Figure 14 +plots the distance |δ�LSpinfoam +c +(γ) − δ�LRegge +c +|, the real and imaginary part of the δ�LSpinfoam +c +(γ), and +the real and imaginary part of the action ˜S evaluated at the complex critical points. For small γ, +δ�LSpinfoam +c +(γ) is approximately real and close to the classical Regge solution δ�LRegge +c +. Increasing γ +results in that δ�LSpinfoam +c +(γ) makes larger corrections to δ�LRegge +c +. +– 27 – + +e^Re(S)Both the complex critical point here, denoted by (δ�LSpinfoam +c +, ˜Z)(r′), and (δLSpinfoam +c +, ˜Z)(r′) +discussed in the last subsection give contributions to A(∆2 +3). When we compare their contributions. +eλS is suppressed faster at the critical point here than at the one in the last subsection (see Figure +15) for fixed γ < 0.1. This relates to the fact that δ�LRegge +c +gives larger deficit angles. Therefore +the complex critical point here contributes to the amplitude much less than the one in the last +subsection for generic small γ and large λ. Recall that δ�LRegge +c +likely relates to the discretization +artifact. The result suggests that the spinfoam amplitude should suppress the contribution from the +discretization artifact, in favor of a good continuum limit. +The complex critical points used in Figure 14 are likely beyond the stationary phase approxima- +tion (for complex action) described above and below (3.7), because these complex critical points +do not analytically relate to the real critical point (˚jh,˚gve,˚zvf) for the flat geometry. It relates to +the existence of complex critical points with Re( ˜S) > 0 in Figure 14(c1) violating (3.9). Indeed, +when we continuously deform the boundary data r′ by the deformation by l35 → l35 + δl35 from the +boundary data of flat geometry to the one that does not admit flat geometry, the solution of (7.2) +and (7.3) deforms analytically from the real critical point to the previous complex critical point +(δLSpinfoam +c +, ˜Z)(r′) (see Figure 13, and the similar property holds for the complex critical points in +Section 6), but not to any of the complex critical points used in Figure 14. +The complex critical point used in Figure 14 has to be studied by the fully-fledged Picard- +Lefschetz theory (see, e.g. [23, 53, 54]). Consequently, given that the spinfoam amplitude is defined +on the real integration cycle where Re(S) ≤ 0, the complex critical point with Re( ˜S) > 0 does +not contribute to the asymptotics of the amplitude, because the steepest-ascent flow associated to +this critical point turns out to have no intersection with the real integration cycle. Therefore, the +contributions from the complex critical points in Figure 14 are vanishing or suppressed for finite or +larger γ, where Re(S) > 0 or eλRe(S) is suppressed. +8 +Conclusion and Outlook +Our above analysis demonstrates the importance of complex critical points in understanding the +asymptotic behaviour of the spinfoam amplitude in the large-j regime. In the case of the 4-simplex +amplitude, taking into account the complex critical point generalizes the asymptotics to non-Regge +boundary data and relates to the twisted geometry. In the case of the simplicial complex, the +complex critical point plays an important role in deriving the effective dynamics from the spinfoam +amplitude. The effective dynamics closely relate to the Regge gravity in the small γ regime, as +demonstrated by the numerical computation for the amplitude on the double-∆3 complex. +Our work provides a general procedure to derive the effective theory in the large-j regime. +From the perspective of semiclassical analysis, our numerical computation should be generalized to +triangulations larger than double-∆3, which has more internal segments. One should check if the +Regge gravity still can be reproduced by the large-j effective dynamics on larger triangulations. +The effective dynamics in LQG has been primarily investigated in the context of symmetry- +reduced models, such as Loop Quantum Cosmology (LQG) and black holes, see, e.g. [55, 56]. The +effective dynamics is useful in deriving the singularity resolution. Our result shows that the spinfoam +amplitude also results in certain effective dynamics. However, this effective dynamics is in terms +of the discrete Regge geometry, in contrast to the effective dynamics in terms of smooth fields in +LQC and black holes. A research in progress is to understand if the effective dynamics from the +spinfoam amplitude can relate to LQC and black holes. If the relation exists, it might provide a new +approach toward embedding LQC and black hole models in the full theory of LQG. +It is also interesting to investigate the behavior of the effective dynamics under the lattice +refinement for spinfoam amplitudes. The Regge geometries approach to the continuum limit under +– 28 – + +the refinement, so we expect that the effective dynamics of Regge geometries from spinfoams should +reduce to certain effective dynamics of the smooth geometry. +Acknowledgments +The authors acknowledge the helpful communications with Bianca Dittrich, Carlo Rovelli, and +Simone Speziale. M.H. receives support from the National Science Foundation through grants +PHY-1912278 and PHY-2207763, and the sponsorship provided by the Alexander von Humboldt +Foundation during his visit at FAU Erlangen-N¨urnberg. In addition, M.H. acknowledges IQG at FAU +Erlangen-N¨urnberg, IGC at Penn State University, Perimeter Institute for Theoretical Institute, and +University of Western Ontario for the hospitality during his visits. Research at Perimeter Institute +is supported in part by the Government of Canada through the Department of Innovation, Science +and Economic Development and by the Province of Ontario through the Ministry of Colleges and +Universities. +A +Boundary data for single 4-simplex +In Section 3, we introduce the real critical points of the 4-simplex, which corresponds to the Regge +geometry. We construct the Regge boundary geometry, Table 2, 3 and 4 record areas ˚af = γ˚jf, 3d +normals ˚nef and the corresponding spinors ˚ξef of the single 4-simplex. +Table 2. Each cell shows the area of the face shared by line number tetrahedra and column number +tetrahedra. +e +˚af +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +5 +5 +e2 +2 +2 +e3 +5 +2 +e4 +2 +2 +e5 +5 +2 +Table 3. Each cell shows the 3d normal vectors of the face shared by line number tetrahedra and column +number tetrahedra. +e +˚nef +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +(1.00, 0, 0) +(-0.333, -0.943, 0) +(-0.333, 0.471, -0.816) +(-0.333, 0.471, 0.816) +e2 +(0.938, 0, -0.346) +(-0.782, -0.553, 0.289) +(-0.948, 0.276, -0.160) +(-0.616, 0.276, 0.738) +e3 +(-0.313, -0.884, -0.346) +(0.782, 0.553, 0.289) +(0.0553, 0.986, -0.160) +(-0.0553, 0.673, 0.738) +e4 +(-0.244, 0.345, -0.907) +(0.739, -0.215, 0.639) +(-0.0431, -0.768, 0.639) +(-0.0862, 0.122, 0.989) +e5 +(-0.436, 0.617, 0.655) +(0.859, -0.385, -0.338) +(0.0771, -0.938, -0.338) +(0.154, -0.218, -0.964) +Table 4. Each cell shows a spinor ξef corresponding to a 3-normal to the face. +e +˚ξef +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +(0.707, -0.707) +(0.707, -0.236 - 0.667i) +(0.953, 0.175 - 0.247i) +(0.953, -0.175 + 0.247i) +e2 +(0.820, -0.572) +(0.803, -0.487 - 0.344i) +(0.762,0.622 - 0.181i) +(0.932, -0.330 + 0.148i) +e3 +(0.572, -0.273 - 0.774i) +(0.596, -0.655 - 0.463i) +(0.648, 0.043 + 0.761i) +(0.362, 0.076 - 0.929i) +e4 +(0.976, 0.125 - 0.177 i) +(0.905, 0.408 - 0.119 i) +(0.425, 0.051 + 0.904i) +(0.997, -0.0432 + 0.0611i) +e5 +(0.910, -0.240 + 0.339 i) +(0.818, -0.525 + 0.236i) +(0.576, 0.067 - 0.815 i) +(0.991, -0.0778 + 0.1100) +Table 5 and 6 record the values of the real critical point ˚gve and ˚zvf for the 4-simplex with the +boundary data (˚jf,˚ξef). +All the Regge boundary data ˚r = (˚jf,˚ξef) and the data of the real critical point (˚gve,˚zvf) for +the 4-simplex amplitude can be found in the Mathematica notebook [57]. +– 29 – + +Table 5. Each cell of the table is the critical point of ˚gve. +e +e1 +e2 +e3 +e4 +e5 +˚gve +� 0 +−i +−i 0 +� +� 0 +−1.03i +−0.969i −0.358i +� +� 0 +−1.03i +−0.969i 0.337 + 0.119i +� +� 0 +−1.17i +−0.855i −0.149 + 0.105i +� +� 0 +−0.874i +−1.14i −0.199 + 0.141i +� +Table 6. Each cell shows the critical points of ˚zvf +e +˚zvf +e′ +e1 +e2 +e3 +e4 +e5 +e1 +(1,-1) +(1.00, 1.82 + 2.57i) +e2 +(1.00, −0.915 + 0.402i) +(1.00, −1.41 − 0.31i) +e3 +(1.00, −0.333 + 0.943i) +(1.00, 0.086 − 0.690i) +e4 +(1.00, 1.86 + 0.99i) +(1.00, 5.72 + 8.08i) +e5 +(1.00, −1.82 − 2.57i) +(1.00, 0.071 + 0.470i) +B +The Newton-Raphson method +The Newton-Raphson method for the single-variable equation f(x) = 0 is initialized with a starting +point x0, and then one iterate +xn+1 = xn − f (xn) +f ′ (xn), +(B.1) +to approach the solution with higher accuracy. In single 4-simplex case as an example, the equations +of motion is 44 dimensions, we denote by +F +� +� +� +� +�� +z1 +... +z44 +� +�� +� +� +� = +� +�� +f1(z1, ..., z44) +... +f44(z1, ..., z44). +� +�� +(B.2) +The derivative of this system is the 44×44 Jacobian given by: +J(z1, ..., z44) = +� +�� +∂f1 +∂z1 ... +∂f1 +∂z44 +... +... +... +∂f44 +∂z1 ... ∂f44 +∂z44 +� +�� +(B.3) +We define the function G by +G(z) = z − J(z)−1F(z). +(B.4) +The functional Newton-Raphson method for nonlinear systems is the iteration procedure that evolves +from the initial z(0), which in our case is the real critical point ˚x, and generates +z(k) = G +� +z(k−1)� += z(k−1) − J +� +z(k−1)�−1 +F +� +z(k−1)� +, +k ≥ 1. +(B.5) +We can write this as +� +��� +z(k) +1 +... +z(k) +44 +� +��� = +� +��� +z(k−1) +1 +... +z(k−1) +44 +� +��� + +� +��� +∆z(k−1) +1 +... +∆z(k−1) +44 +� +��� , +(B.6) +– 30 – + +where +� +��� +∆z(k−1) +1 +... +∆z(k−1) +44 +� +��� = −J +� +z(k−1)�−1 +F +� +z(k−1)� +. +(B.7) +We set the desired tolerance ϵ = 10−100, and we stop after n iterations when +����(∆z(n−1) +1 +)2 + · · · + (∆z(n−1) +44 +)2 +��� < ϵ +(B.8) +The resulting z(n) is the approximated solution within the tolerance. We evaluate the analytic +continued 4-simplex action S at z(n) and apply it to the asymptotic formula (3.8). +C +Boundary data for the ∆2 +3 complex +C.1 +Boundary data and the real critical point for the flat ∆2 +3 complex +We construct the flat geometry with the segment lengths in Table 1. The corresponding boundary +data for flat geometry is shown in Table 7, 8, 9, 10, 11 and 12. Here, the area af and the spins jf +satisfy af = γjf. +Table 7. Boundary data (˚ab,˚ξeb) for the 4-simplex v1 = {1, 2, 3, 4, 6} +e +˚ξeb +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +(-0.41 + 0.73i, -0.15 - 0.52i) +e2 +(-0.61 + 0.22i, -0.76i) +e3 +(-0.078 - 0.033i, 0.04 - 1.0i) +e4 +(0.60, -0.66 - 0.46i) +(0.76, -0.04 - 0.65i) +e5 +(0.43, -0.18 - 0.88i) +(0.95, -0.03 + 0.31i) +e +˚ab +e’ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +0.75 +e2 +5 +e3 +0.55 +e4 +2 +2 +e5 +2.8 +2.0 +Table 8. Boundary data (˚ab,˚ξeb) for the 4-simplex v2 = {1, 2, 3, 5, 6} +e +˚ξeb +e′ +e′ +2 +e′ +6 +e′ +7 +e′ +8 +e′ +9 +e2 +(-0.72 + 0.13 i, 0.02 - 0.68 i) +e6 +(0.81 i, -0.59i) +e7 +(-0.27 - 0.19i, -0.94i) +e8 +(0.71, -0.24 - 0.67 i) +(0.95, -0.17 + 0.25 i) +e9 +(0.74, -0.67 + 0.05i) +(1.0, 0.048 - 0.068i) +e +˚ab +e′ +e′ +2 +e′ +6 +e′ +7 +e′ +8 +e′ +9 +e2 +2.8 +e6 +5 +e7 +5 +e8 +5 +5 +e9 +2.6 +3.2 +Table 9. Boundary data (˚ab,˚ξeb) for the 4-simplex v3 = {1, 2, 4, 5, 6} +e +˚ξeb +e′ +e′ +3 +e′ +7 +e′ +10 +e′ +11 +e′ +12 +e3 +(-0.22 - 0.03 i, 0.07 - 0.97 i) +e7 +(-0.10 - 0.073i, -0.99i) +e10 +(0.18 + 0.98 i, 0.065 - 0.11 i) +e11 +(0.98, 0.12 - 0.18i) +(0.43, -0.87 + 0.25i) +e12 +(0.99, -0.01 - 0.17i) +(1.0, -0.018 + 0.025 i) +e +˚ab +e′ +e′ +3 +e′ +7 +e′ +10 +e′ +11 +e′ +12 +e3 +2 +e7 +3.2 +e10 +0.69 +e11 +5 +2 +e12 +0.55 +2 +Table 12. Boundary data (˚ab,˚ξeb) for the 4-simplex v6 = {1, 2, 4, 5, 7} +e +˚ξeb +e′ +e′ +10 +e′ +14 +e′ +17 +e′ +20 +e′ +21 +e10 +(0.20 + 0.91 i, 0.07 - 0.35 i) +e14 +(-0.55 + 0.68 i, -0.16 - 0.46 i) +e17 +e20 +(0.76, 0.22 - 0.61 i) +(0.74, 0.57 - 0.36 i) +(0.85, 0.52 - 0.1 i) +e21 +(0.95, -0.31 + 0.07 i) +(0.39, 0.89 - 0.23 i) +e +˚ab +e′ +e′ +10 +e′ +14 +e′ +17 +e′ +20 +e′ +21 +e10 +2 +e14 +0.5 +e17 +e20 +2.1 +5.4 +2.4 +e21 +0.69 +3.5 +– 31 – + +Table 10. Boundary data (˚ab,˚ξeb) for the 4-simplex v4 = {1, 2, 3, 4, 7} +e +˚ξeb +e′ +e′ +1 +e′ +13 +e′ +14 +e′ +15 +e′ +16 +e1 +(-0.33 + 0.75 i, -0.11 - 0.56 i) +e13 +(-0.52 + 0.71 i, -0.35 - 0.32 i) +e14 +(-0.59 + 0.71 i, -0.18 - 0.35 i) +e15 +(0.90, -0.14 - 0.41 i) +(0.63, 0.33 + 0.71 i) +e16 +(0.94, -0.25 - 0.22 i) +(0.94, 0.28 - 0.18i) +e +˚ab +e′ +e′ +1 +e′ +13 +e′ +14 +e′ +15 +e′ +16 +e1 +2 +e13 +3.2 +e14 +2.1 +e15 +5.6 +2.3 +e16 +0.75 +0.5 +Table 11. Boundary data (˚ab,˚ξeb) for the 4-simplex v5 = {1, 2, 3, 5, 7} +e +˚ξeb +e′ +e′ +6 +e′ +13 +e′ +17 +e′ +18 +e′ +19 +e6 +(0.04 + 0.77 i, 0.01 - 0.63 i) +e13 +(-0.48 + 0.71 i, -0.31 - 0.41 i) +e17 +(-0.19 + 0.17 i, -0.18 - 0.95 i) +(-0.05 + 0.25 i, -0.06 - 0.97 i) +e18 +(0.90, -0.43) +e19 +(0.71, -0.26 - 0.65 i) +(0.95, 0.19 + 0.25 i) +e +˚ab +e′ +e′ +6 +e′ +13 +e′ +17 +e′ +18 +e′ +19 +e6 +2.6 +e13 +5.6 +e17 +5.4 +3.5 +e18 +5 +e19 +3.2 +5.2 +Once the flat geometry is constructed, the real critical points +� +˚jh,˚gve,˚zvf +� +can be obtained by +solving the critical equations Eq.(3.1) and (3.2). The solution of the critical point equations relates +to the Lorentzian Regge geometry, as described in [8, 9]. ˚gve relates to the Lorentzian transformation +acting on each tetrahedron and glueing them together to form the ∆2 +3 complex. In this model, we fix +gve to be constant SL(2, C) matrices for v1e5, v2e9, v3e12, v4e16, v5e19, v6e21. The group elements gve +for the bulk tetrahedra v1e1, v1e2, v2e6, v2e7, v3e3, v3e10, v4e13, v5e17, v6e14 are fixed to be the upper +triangular matrix. For the ∆2 +3 triangulation, there are five internal faces h(12k) with k = 3, 4, 5, 6, 7. +The areas of these internal faces are shown in Table C.1. The numerical results of the real critical +Table 13. Areas of internal faces h in ∆2 +3 complex. +ah(123) +ah(124) +ah(125) +ah(126) +ah(127) +0.971 +0.333 +1.55 +1.78 +1.93 +point (˚gve, ˚zvf) corresponding to the flat geometry are listed in Table 14, 15, 16, 17, 18 and 19. +Table 14. The real critical point (˚gve, ˚zvf) for the 4-simplex v1 = (1, 2, 3, 4, 6) +e +e1 +e2 +e3 +˚gv1e +�0.96 0.42 + 0.04i +0 +1 +� +�0.99 −0.05 − 0.15i +0 +1 +� +�0.77 −0.13 − 0.72i +0 +1.3 +� +e +e4 +e5 +˚gv1e +� +0 +−1.0i +−0.97i 0.34 + 0.12i +� +� +0 +−1.1i +−0.91i 0.46 + 0.12i +� +e +|˚zv1f⟩ +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +(1,-0.94 + 0.69i) +(1,-0.82 + 0.45i) +e2 +(1,0.87 - 0.49i) +(1,-0.33 + 0.94i) +e3 +(1,-0.1 + 1.5i) +(1,2.5 + 6.0i) +e4 +(1,-0.92 + 0.40i) +(1,0.3 + 2.1i) +e5 +(1,-0.14 + 0.75i) +(1,0.2 - 1.4i) +Table 15. The real critical point (˚gve, ˚zvf) for the 4-simplex v2 = (1, 2, 3, 5, 6). +e +e2 +e6 +e7 +˚gv2e +�0.99 −0.05 − 0.15i +0.99 −0.05 − 0.15i +� +�0.98 0.32 +0 +1 +� +�1.0 −0.031 + 0.044i +0 +0.96 +� +e +e8 +e9 +˚gv2e +� +0 +−1.0i +−1.0i +0 +� +� +1.26 +0.09 − 0.13i +0.09 + 0.13i +0.82 +� +e +|˚zv2f⟩ +e′ +e′ +2 +e′ +6 +e′ +7 +e′ +8 +e′ +9 +e2 +(1,-0.1 + 1.5 i) +(1,-0.14 + 0.75i) +e6 +(1,0.87 - 0.49i) +(1, 0.87 - 0.49i) +e7 +(1,-0.86 - 0.07i) +(1,1.8 + 2.6i) +e8 +(1,-0.33 + 0.94i) +(1,-1.8 - 2.6 i) +e9 +(1,-1.09 - 0.05i) +(1,4.9 + 7.0 i) +Table 18. The real critical point (˚gve, ˚zvf) for the 4-simplex v5 = (1, 2, 3, 5, 7). +e +e6 +e13 +e17 +˚gv5e +�0.98 0.32 +0 +1 +� +�0.84 0.82 + 0.19i +0 +1.2 +� +�0.84 0.73 − 0.05i +0 +1.2 +� +e +e18 +e19 +˚gv5e +� +0 +−1.1i +−0.88i −0.72i +� +� +0 +−1.2i +−0.86i 0.03 − 0.72i +� +e +|˚zv5f⟩ +e′ +e′ +6 +e′ +13 +e′ +17 +e′ +18 +e′ +19 +e6 +(1,-0.86 - 0.07i) +(1,-1.09 - 0.05i) +e13 +(1,0.87 - 0.49i) +(1,-0.83 + 0.56i) +e17 +(1,-0.92 + 0.75i) +(1,1,-3.2 + 0.6i) +e18 +(1,-1) +(1,-1.9 + 2.2i) +e19 +(1,-0.73 + 0.54i) +(1,-1.8 - 0.8 i) +– 32 – + +Table 16. The real critical point (˚gve, ˚zvf) for the 4-simplex v3 = (1, 2, 4, 5, 6). +e +e3 +e7 +e10 +˚gv3e +�0.77 −0.13 − 0.72i +0 +1.3 +� +�1.0 −0.031 + 0.044i +0 +0.96 +� +�0.96 0.38 +0 +1 +� +e +e11 +e12 +˚gv3e +� +0 +−1.2i +−0.86i −0.15 + 0.11i +� +� +0 +−1.8i +−0.55i −0.16 + 0.12i +� +e +|˚zv3f⟩ +e′ +e′ +3 +e′ +7 +e′ +10 +e′ +11 +e′ +12 +e3 +(1,-0.94 + 0.69i) +(1,0.3 + 2.1i) +e7 +(1,-0.1 + 1.5i) +(1, 4.9 + 7.0i) +e10 +(1,-0.86 - 0.07i) +(1,-0.45 - 0.08i) +e11 +(1,1.8 + 2.6i) +(1,-0.68 - 0.15i) +e12 +(1,2.5 + 6.0i) +(1,5.7 + 8.1 i) +Table 17. The real critical point (˚gve, ˚zvf) for the 4-simplex v4 = (1, 2, 3, 4, 7). +e +e1 +e13 +e14 +˚gv4e +�0.96 0.42 + 0.04i +0 +1 +� +�0.84 0.82 + 0.19i +0 +1.2 +� +�0.68 1.3 + 0.9i +0 +1.5 +� +e +e15 +e16 +˚gv4e +� +0 +−1.3i +−0.79i −0.34 − 0.92i +� +� +0 +−1.3i +−0.77i −0.49 − 1.01i +� +e +|˚zv4f⟩ +e′ +e′ +1 +e′ +13 +e′ +14 +e′ +15 +e′ +16 +e1 +(1,0.87 - 0.49i) +(1,-0.92 + 0.40 i) +e13 +(1,-0.92 + 0.75i) +(1, -0.73 + 0.54i) +e14 +(1,-0.94 + 0.69i) +(1,-0.94 + 0.77i) +e15 +(1,-0.83 + 0.56i) +(1,-1.1 - 1.2i) +e16 +(1,-0.82 + 0.45i) +(1,-1.0 + 0.81i) +Table 19. The real critical point (˚gve, ˚zvf) for the 4-simplex v6 = (1, 2, 4, 5, 7). +e +e10 +e14 +e17 +˚gv6e +�0.96, 0.38 +0 +1 +� +�0.68 1.3 + 0.9i +0 +1.5 +� +�0.84 0.73 − 0.05i +0 +1.2 +� +e +e20 +e21 +˚gv6e +� +0 +−1.1i +−0.93i 0.17 − 0.96i +� +� +0 +−1.2i +−0.84i 0.4 − 2.3i +� +e +|˚zv6f⟩ +e′ +e′ +10 +e′ +14 +e′ +17 +e′ +20 +e′ +21 +e10 +(1,-0.94 + 0.69i) +(1,-0.68 - 0.15i) +e14 +(1,-0.92 + 0.75i) +(1,-1+0.81i) +e17 +(1,-0.86 - 0.07i) +(1,-1.9+2.2i) +e20 +(1,-0.94 + 0.77i) +(1,-2.7 - 0.4i) +e21 +(1,-0.45 - 0.08i) +(1,-3.2+0.6i) +All the boundary data ˚r = (˚jb,˚ξeb) and the data of the real critical point (˚jh,˚gve,˚zvf) can be +found in the Mathematica notebook in [57]. +C.2 +Boundary data and the pseudo critical points for the curved ∆2 +3 complex +The boundary data in Appendix C.1 admits a flat geometry. To construct a curved geometry, we +deform the segment length l35 → l35 +10−3 and keep the other boundary segment lengths unchanged. +We list the boundary data for this curved geometry in Table 20, 21, 22, 23, 24 and 25 as the internal +segment length is l12 = L0 + δLRegge +c +. +Table 20. Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v1 = {1, 2, 3, 4, 6} +e +ξeb +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +(-0.40 + 0.73i, -0.15 - 0.53i) +e2 +(-0.61 + 0.22i, - 0.76i) +e3 +(-0.079 - 0.033i, 0.04 - 1.0i) +e4 +(0.60, -0.66 - 0.46i) +(0.76, -0.04 - 0.65i) +e5 +(0.43, -0.18 - 0.88i) +(0.95, -0.03 + 0.31i) +e +ab +e’ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +0.75 +e2 +5 +e3 +0.55 +e4 +2 +2 +e5 +2.8 +2.0 +Table 21. Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v2 = {1, 2, 3, 5, 6} +e +ξeb +e′ +e′ +2 +e′ +6 +e′ +7 +e′ +8 +e′ +9 +e2 +(-0.71 + 0.13i, 0.02 - 0.69i) +e6 +(0.81 i, -0.59i) +e7 +(-0.27 - 0.19i, -0.94i) +e8 +(0.71, -0.24 - 0.67 i) +(0.95, -0.17 + 0.25 i) +e9 +(0.74, -0.67 + 0.05i) +(1.0, 0.049 - 0.065i) +e +ab +e′ +e′ +2 +e′ +6 +e′ +7 +e′ +8 +e′ +9 +e2 +2.8 +e6 +5 +e7 +5 +e8 +5 +5 +e9 +2.6 +3.2 +Table 24. Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v5 = {1, 2, 3, 5, 7} +e +ξeb +e′ +e′ +6 +e′ +13 +e′ +17 +e′ +18 +e′ +19 +e6 +(0.04 + 0.77 i, 0.01 - 0.64 i) +e13 +(-0.48 + 0.71 i, -0.31 - 0.41 i) +e17 +(-0.19 + 0.17 i, -0.18 - 0.95 i) +(-0.05 + 0.25 i, -0.05 - 0.97 i) +e18 +(0.90, -0.43) +e19 +(0.71, -0.26 - 0.66 i) +(0.95, 0.19 + 0.24 i) +e +ab +e′ +e′ +6 +e′ +13 +e′ +17 +e′ +18 +e′ +19 +e6 +2.6 +e13 +5.6 +e17 +5.4 +3.5 +e18 +5 +e19 +3.2 +5.2 +– 33 – + +Table 22. Boundary data (ab, ξeb) of curved geometry for the 4-simplex v3 = {1, 2, 4, 5, 6} +e +ξeb +e′ +e′ +3 +e′ +7 +e′ +10 +e′ +11 +e′ +12 +e3 +(-0.22 - 0.03 i, 0.07 - 0.97 i) +e7 +(-0.105 - 0.072i, -0.99i) +e10 +(0.18 + 0.98 i, 0.065 - 0.106 i) +e11 +(0.98, 0.12 - 0.18i) +(0.43, -0.87 + 0.25i) +e12 +(0.99, -0.01 - 0.17i) +(1.0, -0.018 + 0.025 i) +e +ab +e′ +e′ +3 +e′ +7 +e′ +10 +e′ +11 +e′ +12 +e3 +2.0 +e7 +3.2 +e10 +0.69 +e11 +5 +2 +e12 +0.55 +2 +Table 23. Boundary data (ab, ξeb) of curved geometry for the 4-simplex v4 = {1, 2, 3, 4, 7} +e +ξeb +e′ +e′ +1 +e′ +13 +e′ +14 +e′ +15 +e′ +16 +e1 +(-0.33 + 0.75 i, -0.12 - 0.57 i) +e13 +(-0.52 + 0.71 i, -0.35 - 0.32 i) +e14 +(-0.58 + 0.71 i, -0.19 - 0.35 i) +e15 +(0.90, -0.14 - 0.41 i) +(0.63, 0.33 + 0.71 i) +e16 +(0.94, -0.25 - 0.22 i) +(0.94, 0.28 - 0.18i) +e +ab +e′ +e′ +1 +e′ +13 +e′ +14 +e′ +15 +e′ +16 +e1 +2 +e13 +3.2 +e14 +2.1 +e15 +5.6 +2.3 +e16 +0.75 +0.5 +Table 25. Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v6 = {1, 2, 4, 5, 7} +e +ξeb +e′ +e′ +10 +e′ +14 +e′ +17 +e′ +20 +e′ +21 +e10 +(0.20 + 0.91 i, 0.07 - 0.35 i) +e14 +(-0.55 + 0.68 i, -0.16 - 0.47 i) +e17 +e20 +(0.76, 0.22 - 0.61 i) +(0.74, 0.57 - 0.36 i) +(0.85, 0.52 - 0.1 i) +e21 +(0.95, -0.31 + 0.07 i) +(0.39, 0.89 - 0.23 i) +e +ab +e′ +e′ +10 +e′ +14 +e′ +17 +e′ +20 +e′ +21 +e10 +2 +e14 +0.5 +e17 +e20 +2.1 +5.4 +2.4 +e21 +0.69 +3.5 +The curved geometry does not have real critical point. However, we can find the pseudo-critical +point (j0 +h, g0 +ve, z0 +vf), which is close to the real critical point inside the real integration domain. The +pseudo-critical point satisfies the critical equation (3.1) but violates critical equation (3.2). The +data for the pseudo-critical point is listed in Table 26, 27, 28, 29, 30 and 31. +Table 26. The pseudo-critical point (g0 +ve, z0 +vf) for the 4-simplex v1 = (1, 2, 3, 4, 6) +e +e1 +e2 +e3 +g0 +v1e +�0.96 0.40 + 0.02i +0 +1 +� +�0.99 −0.06 − 0.16i +0 +1 +� +� +0.78 +−0.12 − 0.71i +−0.00024 − 0.00065i +1.29 +� +e +e4 +e5 +g0 +v1e +�−0.0016 − 0.0001i +−1.0i +−0.97i +0.34 + 0.12i +� +� +0 +−1.1i +−0.91i 0.46 + 0.12i +� +e +|z0 +v1f⟩ +e′ +e′ +1 +e′ +2 +e′ +3 +e′ +4 +e′ +5 +e1 +(1,-0.95 + 0.70i) +(1,-0.82 + 0.45i) +e2 +(1, 0.87 - 0.50i) +(1,-0.34 + 0.95i) +e3 +(1,-0.1 + 1.5i) +(1,2.5 + 6.0i) +e4 +(1,-0.92 + 0.40i) +(1,0.3 + 2.1i) +e5 +(1,-0.14 + 0.75i) +(1,0.2 - 1.4i) +Table 27. The pseudo-critical point (g0 +ve, z0 +vf) for the 4-simplex v2 = (1, 2, 3, 5, 6). +e +e2 +e6 +e7 +g0 +v2e +� +0.99 +−0.05 − 0.15i +0.0024 − 0.0112i +1.01 +� +� +0.98 0.30 +0 +1 +� +� +1.0 −0.029 + 0.048i +0 +0.97 +� +e +e8 +e9 +g0 +v2e +� +0.0008 + 0.00056i +−1.0i +−1.0i +−0.0054 − 0.0011i +� +� +0 +−0.98i +−1.0i −0.029 + 0.016i +� +e +|z0 +v2f⟩ +e′ +e′ +2 +e′ +6 +e′ +7 +e′ +8 +e′ +9 +e2 +(1,-0.1 + 1.5 i) +(1,-0.14 + 0.75i) +e6 +(1,0.87 - 0.48i) +(1, -1) +e7 +(1,-0.86 - 0.07i) +(1,1.8 + 2.6i) +e8 +(1,-0.33 + 0.94i) +(1,-1.8 - 2.6 i) +e9 +(1,-1.09 - 0.05i) +(1,4.7 + 6.9i) +Table 30. The pseudo-critical point (g0 +ve, z0 +vf) for the 4-simplex v5 = (1, 2, 3, 5, 7). +e +e6 +e13 +e17 +g0 +v5e +� +0.98 +0.32 +0.011 + 0.006i 1.03 +� +� +0.84 +0.82 + 0.19i +−0.0012 + 0.011i +1.19 +� +�0.84 0.73 − 0.05i +0 +1.2 +� +e +e18 +e19 +g0 +v5e +� +−0.00066 + 0.00052 −1.1i +−0.88i +−0.72i +� +� +0 +−1.2i +−0.86i 0.03 − 0.72i +� +e +|z0 +v5f⟩ +e′ +e′ +6 +e′ +13 +e′ +17 +e′ +18 +e′ +19 +e6 +(1,-0.86 - 0.07i) +(1,-1.09 - 0.06i) +e13 +(1,0.87 - 0.50i) +(1,-0.83 + 0.56i) +e17 +(1,-0.93 + 0.75i) +(1,1,-3.2 + 0.6i) +e18 +(1,-1) +(1,-2 + 2.2i) +e19 +(1,-0.73 + 0.54i) +(1,-1.8 - 0.8 i) +The boundary data for the curved geometry and the corresponding pseudo-critical point can be +found in Mathematica notebook [57]. +– 34 – + +Table 28. The real critical point (g0 +ve, z0 +vf) for the 4-simplex v3 = (1, 2, 4, 5, 6). +e +e3 +e7 +e10 +g0 +v3e +�0.78 −0.13 − 0.72i +0 +1.29 +� +� +1.04 +−0.030 + 0.046i +−0.0010 + 0.0018i +0.96 +� +�0.96 0.38 +0 +1 +� +e +e11 +e12 +g0 +v3e +�−0.00013 − 0.0001i +−1.2i +−0.85i +−0.15 + 0.11i +� +� +0 +−1.8i +−0.55i −0.16 + 0.12i +� +e +|z0 +v3f⟩ +e′ +e′ +3 +e′ +7 +e′ +10 +e′ +11 +e′ +12 +e3 +(1,-0.94 + 0.69i) +(1,0.3 + 2.1i) +e7 +(1,-0.1 + 1.5i) +(1, 4.9 + 7.0i) +e10 +(1,-0.86 - 0.07i) +(1,-0.45 - 0.08i) +e11 +(1,1.8 + 2.6i) +(1,-0.68 - 0.15i) +e12 +(1,2.5 + 6.0i) +(1,5.7 + 8.1 i) +Table 29. The pseudo-critical point (g0 +ve, z0 +vf) for the 4-simplex v4 = (1, 2, 3, 4, 7). +e +e1 +e13 +e14 +g0 +v4e +� +0.96 +0.42 + 0.04i +0.02 − 0.02i +1.05 +� +�0.84 0.82 + 0.2i +0 +1.2 +� +� +0.68 +1.3 + 0.9i +−0.0023 + 0.0038i 1.5 + 0.01i +� +e +e15 +e16 +g0 +v4e +�0.0032 − 0.0015i +−1.3i +−0.79i +−0.34 − 0.92i +� +� +0 +−1.3i +−0.77i −0.49 − 1.01i +� +e +|z0 +v4f⟩ +e′ +e′ +1 +e′ +13 +e′ +14 +e′ +15 +e′ +16 +e1 +(1,0.88 - 0.46i) +(1,-0.91 + 0.40 i) +e13 +(1,-0.92 + 0.75i) +(1, -0.73 + 0.54i) +e14 +(1,-0.94 + 0.68i) +(1,-0.94 + 0.77i) +e15 +(1,-0.83 + 0.56i) +(1,-1.1 - 1.2i) +e16 +(1,-0.82 + 0.45i) +(1,-1.0 + 0.81i) +Table 31. The pseudo-critical point (g0 +ve, z0 +vf) for the 4-simplex v6 = (1, 2, 4, 5, 7). +e +e10 +e14 +e17 +g0 +v6e +� +0.96, 0.38 +0.00077 + 0.00070i 1.05 +� +�0.68 1.3 + 0.9i +0 +1.5 +� +� +0.83 +0.73 − 0.05i +−0.0014 − 0.0019i +1.2 +� +e +e20 +e21 +g0 +v6e +� +−0.00019 − 0.00100i +−1.1i +−0.93i +0.17 − 0.96i +� +� +0 +−1.2i +−0.84i 0.4 − 2.3i +� +e +|z0 +v6f⟩ +e′ +e′ +10 +e′ +14 +e′ +17 +e′ +20 +e′ +21 +e10 +(1,-0.94 + 0.68i) +(1,-0.68 - 0.15i) +e14 +(1,-0.92 + 0.75i) +(1,-1+0.81i) +e17 +(1,-0.86 - 0.07i) +(1,-1.9+2.2i) +e20 +(1,-0.94 + 0.77i) +(1,-2.7 - 0.4i) +e21 +(1,-0.45 - 0.08i) +(1,-3.2+0.6i) +D +Regge Action +Let’s first recall the volume of the simplex. The volume formula for the Lorentzian 4-simplex σ is +given by [58, 59] +Vσ = (−1)4 +24(4!)2 det(Cσ) +(D.1) +where Vσ is the volume square and det(Cσ) is the Cayley–Menger determinant. The Cayley–Menger +matrix Cσ is the 6 × 6 matrix with entries l2 +ij for i, j = 0, · · · , 4, where lij is the segment length. +The Cayley–Menger matrix is augmented by an additional row and column with entries given by +(Cσ)5,5 = 0 and (Cσ)i,5 = (Cσ)5,j = 1. That is +Cσ = +� +l2 +ij 1i +1j 0 +� +(D.2) +Similarly, the volume formula of the Euclidean tetrahedron is given by +Vτ = (−1)3+1 +23(3!)2 det(Cτ) +(D.3) +here, Cτ is the Cayley–Menger matrix for the tetrahedron, which is a 5 × 5 matrix defined similarly +as the above. +Given ⃗a and⃗b as timelike normal vector of two tetrahedra τa, τb of the 4-simplex σ, the Lorentzian +dihedral angles are [60, 61] +θt(σ) = sgn(⃗a ·⃗b) cosh−1 +� +sgn(⃗a ·⃗b) ⃗a ·⃗b +|⃗a||⃗b| +� +, +sgn(⃗a ·⃗b) = +� +(⃗a ·⃗b)2 +⃗a ·⃗b +. +(D.4) +In the 4-dimentional triangulation, the hinge of the angle is a triangle denoted by t. Given a triangle +t, it is shared by τa and τb, and s¯t is the length square of the segment opposite to the triangle t in σ. +For example, in the 4-simplex σ = (12345), the tetrahedra τa = (1234) and τb = (1235) share the +– 35 – + +triangle t = (123). Then ¯t is the segment (45). The dihedral angles w.r.t t are given by [62] +θt(σ) = +�� +1 +Vt +∂Vσ +∂s¯t +�2 +1 +Vt +∂Vσ +∂s¯t +cosh−1 +� +� +� +� +�� +1 +Vt +∂Vσ +∂s¯t +�2 +1 +Vt +∂Vσ +∂s¯t +32·42 +Vt +∂Vσ +∂s¯t +� +32 Vτa +Vt +� +32 Vτb +Vt +� +� +� +� +(D.5) +Here, V are volume square (Vt = a2 +t is the area square) and s is length square. As we only consider +the space-like triangles and tetrahedra, so all the volume square are positive. The above formula +can be simplified as +θt(σ) = +�� +1 +Vt +∂Vσ +∂s¯t +�2 +1 +Vt +∂Vσ +∂s¯t +cosh−1 +� +� +� +� +42 +�� +1 +Vt +∂Vσ +∂s¯t +�2 +� +Vτa +� +Vτb +� +� +� +� . +(D.6) +Here, the volume of 4-simplex, tetrahedra and areas of triangles can be computed by following +Eq.(D.1) and Eq.(D.3). Given any simplicial complex K, Regge action can be defined as +SRegge = +� +σ⊂K +� +t⊂σ +atθt(σ), +(D.7) +where at are the areas of the triangles t and θt is the dihedral angle of triangle t. +References +[1] E. Witten, A New Look At The Path Integral Of Quantum Mechanics, arXiv:1009.6032. +[2] E. Witten, Analytic Continuation Of Chern-Simons Theory, AMS/IP Stud. Adv. Math. 50 (2011) +347–446, [arXiv:1001.2933]. +[3] G. Basar, G. V. Dunne, and M. Unsal, Resurgence theory, ghost-instantons, and analytic continuation +of path integrals, JHEP 10 (2013) 041, [arXiv:1308.1108]. +[4] G. V. Dunne and M. Unsal, Deconstructing zero: resurgence, supersymmetry and complex saddles, +JHEP 12 (2016) 002, [arXiv:1609.05770]. +[5] M. Cristoforetti, F. Di Renzo, A. Mukherjee, and L. Scorzato, Quantum field theories on the Lefschetz +thimble, PoS LATTICE2013 (2014) 197, [arXiv:1312.1052]. +[6] E. Witten, A Note On Complex Spacetime Metrics, arXiv:2111.06514. +[7] F. Conrady and L. Freidel, On the semiclassical limit of 4d spin foam models, Phys. Rev. D78 (2008) +104023, [arXiv:0809.2280]. +[8] J. W. Barrett, R. J. Dowdall, W. J. Fairbairn, F. Hellmann, and R. Pereira, Lorentzian spin foam +amplitudes: Graphical calculus and asymptotics, Class. Quant. Grav. 27 (2010) 165009, +[arXiv:0907.2440]. +[9] M. Han and M. Zhang, Asymptotics of Spinfoam Amplitude on Simplicial Manifold: Lorentzian +Theory, Class. Quant. Grav. 30 (2013) 165012, [arXiv:1109.0499]. +[10] E. Bianchi, E. Magliaro, and C. Perini, LQG propagator from the new spin foams, Nucl. Phys. B 822 +(2009) 245–269, [arXiv:0905.4082]. +[11] M. Han, On Spinfoam Models in Large Spin Regime, Class. Quant. Grav. 31 (2014) 015004, +[arXiv:1304.5627]. +– 36 – + +[12] M. Han, Z. Huang, H. Liu, and D. Qu, Complex critical points and curved geometries in +four-dimensional Lorentzian spinfoam quantum gravity, Phys. Rev. D 106 (2022), no. 4 044005, +[arXiv:2110.10670]. +[13] S. K. Asante, B. Dittrich, and H. M. Haggard, Effective Spin Foam Models for Four-Dimensional +Quantum Gravity, Phys. Rev. Lett. 125 (2020), no. 23 231301, [arXiv:2004.07013]. +[14] M. Han and H. Liu, Analytic continuation of spinfoam models, Phys. Rev. D 105 (2022), no. 2 024012, +[arXiv:2104.06902]. +[15] E. Bianchi, L. Modesto, C. Rovelli, and S. Speziale, Graviton propagator in loop quantum gravity, +Class.Quant.Grav. 23 (2006) 6989–7028, [gr-qc/0604044]. +[16] E. Bianchi, E. Magliaro, and C. Perini, LQG propagator from the new spin foams, Nucl.Phys. B822 +(2009) 245–269, [arXiv:0905.4082]. +[17] E. Bianchi and Y. Ding, Lorentzian spinfoam propagator, Phys.Rev. D86 (2012) 104040, +[arXiv:1109.6538]. +[18] E. Magliaro and C. Perini, Emergence of gravity from spinfoams, EPL 95 (2011), no. 3 30007, +[arXiv:1108.2258]. +[19] C. Perini, Einstein-Regge equations in spinfoams, J. Phys. Conf. Ser. 360 (2012) 012050, +[arXiv:1110.5899]. +[20] E. Magliaro and C. Perini, Regge gravity from spinfoams, Int. J. Mod. Phys. D 22 (2013) 1–21, +[arXiv:1105.0216]. +[21] M. Han, Semiclassical Analysis of Spinfoam Model with a Small Barbero-Immirzi Parameter, Phys. +Rev. D 88 (2013) 044051, [arXiv:1304.5628]. +[22] P. Dona, M. Han, and H. Liu, Spinfoams and high performance computing, arXiv:2212.14396. +[23] M. Han, Z. Huang, H. Liu, D. Qu, and Y. Wan, Spinfoam on Lefschetz Thimble: Markov Chain +Monte-Carlo Computation of Lorentzian Spinfoam Propagator, arXiv:2012.11515. +[24] M. Han, Z. Huang, H. Liu, and D. Qu, Numerical computations of next-to-leading order corrections in +spinfoam large-j asymptotics, Phys. Rev. D 102 (2020), no. 12 124010, [arXiv:2007.01998]. +[25] F. Gozzini, A high-performance code for EPRL spin foam amplitudes, Class. Quant. Grav. 38 (2021), +no. 22 225010, [arXiv:2107.13952]. +[26] P. Frisoni, F. Gozzini, and F. Vidotto, Markov Chain Monte Carlo methods for graph refinement in +Spinfoam Cosmology, arXiv:2207.02881. +[27] P. Dona and P. Frisoni, How-to Compute EPRL Spin Foam Amplitudes, Universe 8 (2022), no. 4 208, +[arXiv:2202.04360]. +[28] S. K. Asante, B. Dittrich, and J. Padua-Arguelles, Effective spin foam models for Lorentzian quantum +gravity, Class. Quant. Grav. 38 (2021), no. 19 195002, [arXiv:2104.00485]. +[29] S. K. Asante, J. D. Sim˜ao, and S. Steinhaus, Spin-foams as semi-classical vertices: gluing constraints +and a hybrid algorithm, arXiv:2206.13540. +[30] S. K. Asante, B. Dittrich, and S. Steinhaus, Spin foams, Refinement limit and Renormalization, +arXiv:2211.09578. +[31] B. Bahr and S. Steinhaus, Numerical evidence for a phase transition in 4d spin foam quantum gravity, +Phys. Rev. Lett. 117 (2016), no. 14 141302, [arXiv:1605.07649]. +[32] C. Rovelli and L. Smolin, Discreteness of area and volume in quantum gravity, Nuclear Physics B 442 +(May, 1995) 593–619. +[33] A. Ashtekar and J. Lewandowski, Quantum theory of geometry. 1: Area operators, Class.Quant.Grav. +14 (1997) A55–A82, [gr-qc/9602046]. +– 37 – + +[34] M. Han and T. Krajewski, Path Integral Representation of Lorentzian Spinfoam Model, Asymptotics, +and Simplicial Geometries, Class. Quant. Grav. 31 (2014) 015009, [arXiv:1304.5626]. +[35] V. Bonzom, Spin foam models for quantum gravity from lattice path integrals, Phys. Rev. D 80 (2009) +064028, [arXiv:0905.1501]. +[36] J. Engle, W. Kaminski, and J. Oliveira, Addendum to ‘eprl/fk asymptotics and the flatness problem’, +Classical and Quantum Gravity 38 (2021), no. 11 119401. +[37] M. Han, Z. Huang, and A. Zipfel, Spin foam propagator: A new perspective to include the cosmological +constant, Phys. Rev. D 97 (2018), no. 8 084055, [arXiv:1711.11162]. +[38] M. Han, Einstein Equation from Covariant Loop Quantum Gravity in Semiclassical Continuum Limit, +Phys. Rev. D 96 (2017), no. 2 024047, [arXiv:1705.09030]. +[39] B. Dittrich and A. Kogios, From spin foams to area metric dynamics to gravitons, arXiv:2203.02409. +[40] J. W. Barrett, M. Rocek, and R. M. Williams, A Note on area variables in Regge calculus, Class. +Quant. Grav. 16 (1999) 1373–1376, [gr-qc/9710056]. +[41] A. Melin and J. Sj¨ostrand, Fourier integral operators with complex-valued phase functions, in Fourier +Integral Operators and Partial Differential Equations (J. Chazarain, ed.), (Berlin, Heidelberg), +pp. 120–223, Springer Berlin Heidelberg, 1975. +[42] L. Hormander, The Analysis of Linear Partial Differential Operators I, ch. Chapter 7, p. Theorem +7.7.5. Springer-Verlag Berlin, 1983. +[43] F. Conrady and L. Freidel, Path integral representation of spin foam models of 4d gravity, Class. +Quant. Grav. 25 (2008) 245010, [arXiv:0806.4640]. +[44] J. W. Barrett, R. J. Dowdall, W. J. Fairbairn, H. Gomes, and F. Hellmann, Asymptotic analysis of the +EPRL four-simplex amplitude, J. Math. Phys. 50 (2009) 112504, [arXiv:0902.1170]. +[45] M.-X. Han and M. Zhang, Asymptotics of Spinfoam Amplitude on Simplicial Manifold: Euclidean +Theory, Class. Quant. Grav. 29 (2012) 165004, [arXiv:1109.0500]. +[46] P. Dona, M. Fanizza, G. Sarno, and S. Speziale, Numerical study of the Lorentzian +Engle-Pereira-Rovelli-Livine spin foam amplitude, Phys. Rev. D100 (2019), no. 10 106003, +[arXiv:1903.12624]. +[47] M. Kapovich and J. J. Millson, The symplectic geometry of polygons in Euclidean space, Journal of +Differential Geometry 44 (1996), no. 3 479 – 513. +[48] C. Rovelli and S. Speziale, A Semiclassical tetrahedron, Class. Quant. Grav. 23 (2006) 5861–5870, +[gr-qc/0606074]. +[49] P. Dona and S. Speziale, Asymptotics of lowest unitary SL(2,C) invariants on graphs, Phys. Rev. D +102 (2020), no. 8 086016, [arXiv:2007.09089]. +[50] S. K. Asante, B. Dittrich, and H. M. Haggard, Discrete gravity dynamics from effective spin foams, +Class. Quant. Grav. 38 (2021), no. 14 145023, [arXiv:2011.14468]. +[51] H. Liu. https://github.com/LQG-Florida-Atlantic-University/extended_spinfoam, 2021. +[52] D. Qu. https://github.com/dqu2017/ +4-simplex-amplitude-and-effective-dynamics-on-double-Delta3-complex, 2021. +[53] S. Lefschetz, The Picard-Lefschetz Theory, pp. 135–148. Springer New York, New York, NY, 1975. +[54] A. Alexandru, G. Basar, P. F. Bedaque, and N. C. Warrington, Complex Paths Around The Sign +Problem, arXiv:2007.05436. +[55] A. Ashtekar, T. Pawlowski, and P. Singh, Quantum Nature of the Big Bang: Improved dynamics, Phys. +Rev. D74 (2006) 084003, [gr-qc/0607039]. +– 38 – + +[56] A. Ashtekar, J. Olmedo, and P. Singh, Quantum Transfiguration of Kruskal Black Holes, Phys. Rev. +Lett. 121 (2018), no. 24 241301, [arXiv:1806.00648]. +[57] D. Qu. https://github.com/dqu2017/Numerical-Asymtotics, 2020. +[58] K. Tate and M. Visser, Realizability of the Lorentzian (n,1)-Simplex, JHEP 01 (2012) 028, +[arXiv:1110.5694]. +[59] K. Tate and M. Visser, Fixed-topology lorentzian triangulations: Quantum regge calculus in the +lorentzian domain, Journal of High Energy Physics 2011 (2011) 1–32. +[60] S. K. Asante, B. Dittrich, and J. Padua-Arg¨uelles, Complex actions and causality violations: +Applications to Lorentzian quantum cosmology, arXiv:2112.15387. +[61] B. Dittrich, S. Gielen, and S. Schander, Lorentzian quantum cosmology goes simplicial, Class. Quant. +Grav. 39 (2022), no. 3 035012, [arXiv:2109.00875]. +[62] B. Dittrich, L. Freidel, and S. Speziale, Linearized dynamics from the 4-simplex Regge action, Phys. +Rev. D 76 (2007) 104020, [arXiv:0707.4513]. +– 39 – + diff --git a/SNE1T4oBgHgl3EQfHwN5/content/tmp_files/load_file.txt b/SNE1T4oBgHgl3EQfHwN5/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e25a4331a16a1ff1072ec66d881a874b88863ee --- /dev/null +++ b/SNE1T4oBgHgl3EQfHwN5/content/tmp_files/load_file.txt @@ -0,0 +1,2544 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf,len=2543 +page_content='Prepared for submission to JHEP Complex critical points in Lorentzian spinfoam quantum gravity: 4-simplex amplitude and effective dynamics on double-∆3 complex Muxin Han1,2 Hongguang Liu2 Dongxue Qu3,1 1Department of Physics, Florida Atlantic University, 777 Glades Road, Boca Raton, FL 33431-0991, USA 2Department Physik, Institut f¨ur Quantengravitation, Theoretische Physik III, Friedrich-Alexander Univer- sit¨at Erlangen-N¨urnberg, Staudtstr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 7/B2, 91058 Erlangen, Germany 3Perimeter Institute for Theoretical Physics, 31 Caroline St N, Waterloo, ON N2L 2Y5, Canada E-mail: hanm(AT)fau.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='edu, hongguang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='liu(AT)gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='fau.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='de, dqu(AT)perimeterinstitute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='ca Abstract: The complex critical points are analyzed in the 4-dimensional Lorentzian Engle-Pereira- Rovelli-Livine (EPRL) spinfoam model in the large-j regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For the 4-simplex amplitude, taking into account the complex critical point generalizes the large-j asymptotics to the situation with non-Regge boundary data and relates to the twisted geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For generic simplicial complexes, we present a general procedure to derive the effective theory of Regge geometries from the spinfoam amplitude in the large-j regime by using the complex critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective theory is analyzed in detail for the spinfoam amplitude on the double-∆3 simplicial complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We numerically compute the effective action and the solution of the effective equation of motion on the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective theory reproduces the classical Regge gravity when the Barbero-Immirzi parameter γ is small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02930v1 [gr-qc] 7 Jan 2023 Contents 1 Introduction 1 2 Spinfoam amplitude 3 3 Complex critical point and effective dynamics 6 4 Four-simplex amplitude 9 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 The amplitude and parametrization of variables 10 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Deviating from the shape-matching 12 5 Revisit the ∆3 amplitude 14 6 Double-∆3 amplitude and effective action 16 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 Some setups 16 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Numerical computing the effective action 19 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 Comparing to Regge action 21 7 Solutions of effective dynamics on double-∆3 23 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 Spinfoam complex critical point and the Regge solution δLRegge c 23 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Complex critical point and the other Regge solution δ�LRegge c 27 8 Conclusion and Outlook 28 A Boundary data for single 4-simplex 29 B The Newton-Raphson method 30 C Boundary data for the ∆2 3 complex 31 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 Boundary data and the real critical point for the flat ∆2 3 complex 31 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Boundary data and the pseudo critical points for the curved ∆2 3 complex 33 D Regge Action 35 1 Introduction The perturbative expansion is widely used in quantum theory to make approximate predictions order by order in certain parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The method of perturbative expansion is well-connected to the path integral formulation, whose stationary phase approximation results in the semiclassical expansion in ℏ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' By the stationary phase approximation, the path integral is approximately computed by the dominant contribution from the critical point and neighborhood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The critical point is the solution of the equation of motion, which is obtained from variating the action in the path integral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given a path integral in terms of real variables, traditionally, the semiclassical expansion only takes into account critical points inside the real integration cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, the recent progress in many research areas demonstrates that the complex critical point generically away from the real integration cycle plays a crucial role in the semiclassical expansion of the path integral (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [1–6]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The – 1 – complex critical point is the critical point of the analytically continued path integral, where the integrand is analytically extended to the complexification of the real integration cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The method of stationary phase approximation has been applied extensively to the spinfoam amplitude in Loop Quantum Gravity (LQG) (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [7–11]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The importance of the complex critical point has been demonstrated in the recent progress in the semiclassical analysis of spinfoam amplitude [12–14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' A key result is that the semiclassical curved spacetime geometry can only emerge from the complex critical point of the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Taking into account the complex critical point provides the resolution to the long-standing “flatness problem”, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', the problem of discovering only the flat spacetime geometry in the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This problem turns out to be the confusion from ignoring the complex critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The present work continues from the earlier work [12] and further study the complex critical points and their implications in spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The discussion in this work focuses on the 4-dimensional Lorentzian Engle-Pereira-Rovelli-Livine (EPRL) spinfoam model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Our results demonstrate the impact of the complex critical points mainly from two perspectives: At the level of one 4-simplex amplitude, taking into account the complex critical point generalizes the large-j asymptotics by Barrett et al [8] to the case of non-Regge boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The geometry of the non-Regge boundary data gives the boundary tetrahedra that are glued only with area-matching but without shape-matching, in contrast to the Regge boundary data that requires the shape-matching condition (as well as the orientation matching condition) and determines the Regge boundary geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The generalized 4-simplex amplitude asymptotic behavior depends analytically on the boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This analytic dependence is not manifest in the original asymptotic formula in [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The computation of the generalized asymptotic behavior relies on the numerical method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The discussion in Section 4 provides the general algorithm of computing the complex critical point of the amplitude, and demonstrates the numerical results of the asymptotics for a 1-parameter family of non-Regge boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Based on the application of complex critical points, we develop a formalism to derive the effective theory of Regge geometry from the large-j spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As the result, given a simplicial complex K with M internal segments, the spinfoam amplitude A(K) with Regge boundary data reduces to the integral over the internal line-segment lengths lI, I = 1, · · · , M, A(K) ∼ � M � I=1 dµ(lI) eλS(⃗l) [1 + O(1/λ)] , λ ≫ 1, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) within the neighborhood of the integration domain of A(K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' λ is the scaling parameter of spins jf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' eλS(⃗l) with the effective action S(⃗l) comes from evaluating the analytically continued integrand of A(K) at the complex critical point, which depend analytically on lI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The integral in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) reduced from A(K) is over the Regge geometries with the fixed boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The equation of motion ∂lIS(⃗l) = 0 gives the effective dynamics of Regge geometry implied by the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The formalism of deriving the effective theory is discussed in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In Sections 6 and 7, we apply the formalism to the double-∆3 simplicial complex, which contains only a single internal segment, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', M = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical points and the effective action S(⃗l) are computed numerically following the general algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spinfoam amplitude depends on the Barbero-Immirzi parameter γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The computations are performed for many different values of the Barbero-Immirzi parameter γ, ranging from small to large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The resulting S(⃗l) are compared with the Regge action on the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S(⃗l) is well-approximated by the classical Regge action in the small-γ regime, and S(⃗l) provides the correction to the Regge action with increasing γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The solutions of the effective dynamics are computed numerically for different values of γ and compared to the solution of Regge – 2 – equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The solution from S(⃗l) well-approximates the Regge solution for small γ and gives larger correction when increasing γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Recovering the classical Regge action and solution from the effective dynamics of spinfoam amplitude gives evidence of the semiclassical consistency of spinfoam quantum gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Recovering the classical Regge gravity from the spinfoam amplitude with small γ has been argued earlier in [15–21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Our numerical result confirms this property for the spinfoam amplitude on the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The numerical computations are performed for different γ’s ranging from small to large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Fixing the boundary data, the solutions of the effective dynamics give a trajectory in the space of Regge geometries parametrized by γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The trajectory approaches the solution of the classical Regge equation for small γ as mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For large γ, the trajectory stablizes at the Regge geometry that is different from the classical Regge solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It suggests that the effective theory for large γ differs significantly from the Regge gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The solutions both at small and large γ give non-suppressed contributions to the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In particular, the solutions for large γ violate the known bound |γδh| ≲ λ−1/2 [11–13] (δh is the deficit angle of the Regge geometry), which is valid for non-suppressed contributions to the amplitude with finite and small γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Studying the complex critical points in the spinfoam amplitude closely relates to the recent progress in numerical studies of spinfoam amplitudes [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given the complexity of the spinfoam amplitude, the complex critical point and the corresponding contribution to the spinfoam amplitude has to be computed numerically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The numerical analysis of complex critical points connects to the Lefschetz-thimble and Monte-Carlo computation for the spinfoam integral [23], because every complex critical point associates to an integration cycle known as Lefschetz thimble, and the integral on the Lefschetz thimble collects all contributions associated to the complex critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Another related numerical result is the semiclassical expansion of the spinfoam amplitude to the next-to- leading order from the stationary phase approximation [24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We also would like to mention a few other numerical approaches for spinfoam quantum gravity, including the “sl2cfoam-next” code for the non-perturbative computation of the spinfoam amplitude [25–27], the effective spinfoam model [13, 28], the hybrid algorithm [29], and the spinfoam renormalization [30, 31], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This paper is organized as follows: Section 2 gives a brief review of the integral representation of the EPRL spinfoam amplitude and the definition of the large-j regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In Section 3, we define the real and complex critical points and discuss the general formalism of deriving the effective dynamics of Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Section 4 studies the complex critical point of the 4-simplex amplitude and generalizes the large-j asymptotics to include the non-Regge boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Section 5 revisits the known results on the spinfoam amplitude on ∆3 complex as the preparation for analyzing the amplitude on the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Section 6 discusses the complex critical point in the spinfoam amplitude on the double-∆3 complex and computes the effective action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Section 7 discusses the numerical solution of the effective dynamics on the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In Section 8, we conclude and discuss some outlooks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 2 Spinfoam amplitude A 4-dimensional simplicial complex K contains 4-simplices v, tetrahedra e, triangles f, line segments, and points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The internal and boundary triangles are denoted by h and b (f is either h or b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The SU(2) spins jh, jb ∈ N0/2 are assigned to internal and boundary triangles h, b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spins label the quanta of triangle areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The LQG area spectrum indicates that the quantum area of triangle f is given by af = 8πγGℏ � jf(jf + 1) [32, 33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the large-j regime, which we will focus on, the area spectrum gives af ≃ 8πγGℏjf, or af ≃ γjf when we set the unit such that 8πGℏ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 3 – The Lorentzian EPRL spinfoam amplitude on K is given by summing over internal spins {jh}: A(K) = � {jh} � h djh � [dgdz] eS(jh,gve,zvf ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='jb,ξeb), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) [dgdz] = � (v,e) dgve � (v,f) dΩzvf , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) where djh = 2jh+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary states are SU(2) coherent states |jb, ξeb⟩ where ξeb = ueb�(1, 0)T, ueb ∈ SU(2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb and ξeb are determined by the area and the 3-normal of the boundary triangle b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The summed/integrated variables are gve ∈ SL(2, C), zvf ∈ CP1, and jh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' dgve is the Haar measure on SL(2, C), dg = dβdβ∗dγdγ∗dδdδ∗ |δ|2 , ∀g = � α β γ δ � ∈ SL(2, C), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) and dΩzvf is the scaling invariant measure on CP1: dΩzvf = i 2 (z0 dz1 − z1 dz0) ∧ (¯z0 d¯z1 − ¯z1 d¯z0) ⟨Zvef, Zvef⟩ ⟨Zve′f, Zve′f⟩ , ∀ zvf = (z0, z1)T, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) where Zvef = g† vezvf, ⟨·, ·⟩ is the Hermitian inner product on C2, and zvf is a 2-component spinor for the face f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spinfoam action S in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) is complex and linear to jh, jb in an expression of the form [34], S = � e′ jhF(e′,h) + � (e,b) jbF in/out (e,b) + � (e′,b) jbF in/out (e′,b) , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) F out (e,b) = 2 ln ⟨Zveb, ξeb⟩ ∥Zveb∥ + iγ ln ∥Zveb∥2 , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) F in (e,b) = 2 ln ⟨ξeb, Zv′eb⟩ ∥Zv′eb∥ − iγ ln ∥Zv′eb∥2 , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7) F(e′,f) = 2 ln ⟨Zve′f, Zv′e′f⟩ ∥Zve′f∥ ∥Zv′e′f∥ + iγ ln ∥Zve′f∥2 ∥Zv′e′f∥2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) Here, e and e′ are boundary and internal tetrahedra, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the dual complex K∗, the orientation of ∂f ∗ is outgoing from the vertex dual to v and incoming to another vertex dual to v′, and the orientation of the face f ∗ dual to f induces ∂f ∗’s orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As for the logarithms in the spinfoam action, we fix all the logarithms to be the principal values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The derivation of the spinfoam action S is given in [34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spinfoam amplitude in the formulation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) has the following three types of continuous gauge degrees of freedom, and thus some gauge fixings are needed to remove the redundant degrees of freedom: Firstly, there is SL(2, C) gauge transformation at each v: gve �→ x−1 v gve, zvf �→ x† vzvf, xv ∈ SL(2, C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9) To remove this gauge degree of freedom, we fix one gve to be a constant SL(2, C) matrix for each 4-simplex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The amplitude is independent of the choices of constant matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 4 – Secondly, there is SU(2) gauge transformation on each internal e: gv′e �→ gv′eh−1 e , gve �→ gveh−1 e , he ∈ SU(2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) To fix this SU(2) gauge freedom, one can parameterize one of two SL(2, C) elements: gve, or gv′e by the upper triangular matrix k = �λ−1 µ 0 λ � , λ ∈ R \\ {0}, µ ∈ C (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11) Here, we use the fact that any g ∈ SL(2, C) can be decomposed as g = kh with h ∈ SU(2) and k an upper triangular matrix in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Thirdly, for each zvf, there is the scaling gauge freedom: zvf �→ λvfzvf, λvf ∈ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12) Here, we fix the gauge by setting the first component of zvf to 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' zvf = (1, αvf)T, where αvf ∈ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Furthermore, in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1), we assume the summation over internal jh ∈ N0/2 is bounded by jmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In some situations, jmax is determined by boundary spins jb via the triangle inequality, otherwise jmax are imposed as the cut-off to regularize the infinite sum over spins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To prepare for the stationary phase analysis, we would like to change the summation over jh in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) to integrals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The idea is to apply the Poisson summation formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Firstly, we replace each djh by a smooth compact support function τ[−ϵ,jmax+ϵ](jh) satisfying τ[−ϵ,jmax+ϵ](jh) = djh, for jh ∈ [0, jmax], and τ[−ϵ,jmax+ϵ](jh) = 0, for jh ̸∈ [−ϵ, jmax + ϵ], for any 0 < ϵ < 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This replacement does not change the value of the amplitude A(K) but makes the summand of � jh smooth and compact support in jh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then, by applying the Poisson summation formula, � n∈Z f(n) = � k∈Z � R dnf(n) e2πikn, the discrete summation over jh in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) becomes summing of integrals: A(K) = � {kh∈Z} � � h djh � h 2τ[−ϵ,jmax+ϵ](jh) � [dgdz] eS(k), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13) S(k) = S + 4πi � h jhkh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14) By the area spectrum, the classical area af and small ℏ imply the large spin jf ≫ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This motivates understanding the large-j regime as the semiclassical regime of A(K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then, to probe the semiclassical regime, we scale uniformly both the boundary spins jb and the internal spin cut-off jmax by jb → λjb, jmax → λjmax, λ ≫ 1, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15) so S → λS as a result from S being linear in jb, jh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As a consequence, the spinfoam amlitude A(K) – 5 – in the large-j regime is A(K) = � {kh∈Z} � R � h djh � h 2λ τ[−ϵ,λjmax+ϵ](λjh) � [dgdz] eλS(k), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16) S(k) = S + 4πi � h jhkh, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17) by the change of integration variables jh → λjh, and jh is continous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 3 Complex critical point and effective dynamics The integral in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16) at each kh can be analyzed with the stationary phase method in the regime λ ≫ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' By the standard argument of the stationary phase approximation, by fixing the boundary data, the integral with λ ≫ 1 is approximated by the dominant contributions from the solutions of critical equations and neighborhood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the case of the integrals in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16), the critical equations are Re(S) = ∂gveS = ∂zvf S = 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) ∂jhS = 4πikh, kh ∈ Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) The solutions inside the integration domain are denoted by {˚jh,˚gve,˚zvf}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The integration domain is viewed as a real manifold, and the integration variables are real and imaginary parts of the matrix elements in gve and zvf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We call {˚jh,˚gve,˚zvf} the real critical point accordingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The existence of the real critical point in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16) depends on the boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point may not exist for the generic boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We know that S is a complex action with n real variables x, and ∂xS = 0 gives n complex thus 2n real equations, which is over-constrained for n real variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Consequently, the critical equations (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) coupled with one more equation Re(S) = 0 result in the nonexistence of the general real solution, unless for some special boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As a solution to this problem of over-constrained equations, the integration variables have to be complexified, and action S has to be analytically continued to the complex variables z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We are only interested in the integration domain where the spinfoam action S is analytic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The analytically continued action is denoted by S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' On the space of complex variables, the complex critical equation ∂zS = 0 is not over-constrained anymore because it gives n complex equations for n complex variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Re(S) = 0 is dropped when we study S instead of S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the space of complex variables, the solutions of ∂zS = 0 are called the complex critical points, which play the dominant role for the asymptotics of A(K) in the case that the real critical point is absent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Before discussing the complex critical point, let us firstly review some known results from the critical equations (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) with the boundary data corresponding to Regge geometry on ∂K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real solutions of the part (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) have been well-studied in the literature [7–9, 34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We call these solutions the pseudo-critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As one of the results, the pseudo-critical point satisfying a nondegeneracy condition endows a Regge geometry on K with certain 4-simplex orientations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When focusing on the pseudo-critical points endowing the uniform orientations to all 4-simplices, further imposing (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) to them gives the accidental flatness constraint to their corresponding Regge geometries, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', every deficit angle δh hinged by the internal triangle h [11, 35] satisfies: γδh = 4πkh, kh ∈ Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) When kh = 0, δh at every internal triangle is zero, and the Regge geometry endowed by the real critical point is flat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) is a strong constraint to the allowed geometry from the spinfoams and – 6 – can be satisfied only for special boundary conditions that admit the flat bulk geometry (mod 4πZ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The accidental flatness constraint is consistent with the above argument about over-constrained equations, and it has been demonstrated explicitly in the example well-studied in, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', [12, 36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' If one only considers the real critical point for the dominant contribution to A(K), Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) would imply that only the flat geometry (mod 4πZ) exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This confusion leading to the flatness problem results from ignoring the complex critical point in the stationary phase analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following discussion, we show that the large-λ spinfoam amplitude does receive dominant contributions from the complex critical points away from the real integration domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical points precisely correspond to the curved Regge geometries emergent from the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Interestingly, the application of complex critical points leads to a derivation of effective dynamics of Regge geometry from the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The emergent curved Regge geometries are constrained by the effective dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We firstly provide a general formalism below, then we apply the formalism to the concrete models with several different K in the following sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Motivated by relating to the dynamics of Regge geometry, we separate the integral in the amplitude (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16) into two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Suppose K has M internal segments, the dynamics of Regge geometry should relate to the dynamics of these internal segment-lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Motivated by this, we separate M internal areas jho (ho = 1, · · · , M) from other j¯h (¯h = 1, · · · , F − M), where jho relates to the segment-lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here, F is the total number of internal triangles in K, and M equals the number of the separated internal segments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spinfoam amplitude (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16) then becomes A(K) = � {kh} � M � ho=1 djhoZ{kh} K (jho) , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) where Z{kh} K , called the partial amplitude, is given by Z{kh} K (jho) = � � ¯h dj¯h � h (2λdλjh) � [dgdz]eλS(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) We can then change variables from the areas jho to the internal segment-lengths {lI}M I=1, with I denoting the internal segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The internal triangles ho = 1, · · · , M are suitably chosen such that the change of variables is well-defined in the interested region, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' a neighborhood of {˚jho} of {˚jh,˚gve,˚zvf} corresponding to the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Indeed, the chosen M areas {jho} are related to M segment-lengths {lI} by Heron’s formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Inverting the relation between {jho}M ho=1 and {lI}M I=1 defines the local change of variables (jho, j¯h) → (lI, j¯h) in a neighborhood K of a given Regge geometry in the integration domain of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This procedure is just changing variables without imposing any restrictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When focusing on the integrals in the neighborhood K, we have dM+Njh = JldMlI dF −Mj¯h, where Jl = det(∂jho/∂lI) is the jacobian obtained by the derivatives of Heron’s formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore, the contribution to A(K) from the neighborhood K is expressed as � {kh} � M � I=1 dlIJlZ{kh} K (lI) , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) The partial amplitude Z{kh} K has the external parameters r ≡ {lI, jb, ξeb} including not only the boundary data jb, ξeb but also internal segment-lengths lI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The above decomposition of jh-integrals closely relates to the earlier proposal [37, 38] (see also [39] in the context of area Regge calculus).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' lI parametrizes a submanifold MRegge in the space of jh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The submanifold MRegge collects jh’s that can be interpreted as areas determined by the segment lengths lI (by Heron’s formula).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Generically the space of jh is much larger than the space of segment lengths [40].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' j¯h parametrizes the direction – 7 – transverse to MRegge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To study the partial amplitude Z{kh} K , we apply the theory of stationary phase approximation for complex action with parameters [41, 42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following, we only consider the partial amplitude with kh = 0, while the situation with other kh can be studied analogously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We consider the large-λ integral � K eλS(r,x)dNx, and regard r as the external parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S(r, x) is an analytic function of r ∈ U ⊂ Rk, x ∈ K ⊂ RN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' U × K is a neighborhood of (˚r,˚x), where ˚x is a real critical point of S(˚r, x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S(r, z) with z = x + iy ∈ CN is the analytic extension of S(r, x) to a complex neighborhood of ˚x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical equation is ∂zS = 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7) whose solution is z = Z(r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here, Z(r) is an analytic function of r in the neighborhood U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When r = ˚r, Z(˚r) = ˚x reduces to the real critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When r deviates away from ˚r, Z(r) ∈ CN can move away from the real plane RN, thus is called the complex critical point (see Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' With Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real and complex critical points ˚x and Z(r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S(r, z) is analytic extended from the real axis to the complex neighborhood illustrated by the red disk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' this in mind, we have the following large-λ asymptotic expansion for the integral � K eλS(r,x)dNx = � 1 λ � N 2 eλS(r,Z(r)) � det � −∂2z,zS(r, Z(r))/2π � [1 + O(1/λ)] (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) where S(r, Z(r)) and δ2 z,zS(r, Z(r)) are the action and Hessian at the complex critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In addition, the real part of S is zero or negative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' More precisely, there exists a constant C > 0 such that Re(S) ≤ −C| Im(Z)|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9) See [41, 42] for the proof of this inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This inequality indicates that Re(S) = 0 resulting in the oscillatory phase in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) can only happen at the real critical point, where Im(Z) = 0 and r = ˚r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When r deviates from ˚r with a finite distance, such that Im(Z) is finite and Re(S) is negative, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) is exponentially suppressed when scaling λ to large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) depends analytically on r and interpolates the two different behaviors smoothly in the parameter space of r: The critical point is not real, then Re(S) < 0, which gives the exponentially decaying amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The critical point is real, then Re(S) = 0, and thus eλS gives an oscillatory phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' These two distinct behaviors are obtained by fixing r and scaling λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' But since the asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) depends on r analytically, we can vary r simultaneously as scaling λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then we can – 8 – (u)Z = Z(%)arrive at the regime where the asymptotic behavior (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) is not suppressed at the complex critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Indeed, for any large λ, there always exists r ̸= ˚r but sufficiently close to ˚r, such that Im(Z) and Re(S) are small enough, then eλS in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) is not suppressed at the complex critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The importance of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) is that the integral can receive a dominant contribution from the complex critical point away from the real plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' These complex critical points indeed give the curved Regge geometries missing in the argument of the flatness problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The parameter r including both the boundary data and internal segment lengths determines the Regge geometry that is generically curved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Hence the asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) computes the weight of the Regge geometry contributing to the amplitude and reduces A(K) in K to � 1 λ � N 2� M � I=1 dlINl eλS(r,Z(r)) [1 + O(1/λ)] (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) at each kh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here, Nl ∝ � h (4jh) Jl[det � −δ2 z,zS/2π � ]−1/2 at Z(r), and r = {lI, jb, ξeb}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given that {lI} determines the Regge geometry on K, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) is a path integral of Regge geometries with the effective action S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The integration domain of lI includes curved geometries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The integral (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) derived from the spinfoam amplitude defines an effective theory of Regge geometries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Indeed, if we focus on the dominant contribution and neglect corrections of O(1/λ), by the stationary phase approximation of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10), the effective action S gives the equation of motion ∂S ∂lI = 0, I = 1, · · · , M, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11) which determines the effective dynamics of Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S is generally complex, so (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11) should be analytically continued to complex lI, and thus the solution is generally not real.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As we are going to see in Section 7, we are mainly interested in the regime where the imaginary part of the solution lI is negligible, then the solution has the interpretation of the Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following, we make the above general analysis concrete by considering the examples of spinfoam amplitudes on a single 4-simplex and the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We also revisit briefly the existing results on ∆3 complex for the completeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We compute numerically the complex critical points and S, confirming the contribution of the complex critical points to the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In particular, the double-∆3 model corresponding to M = 1 exhibits the non-trivial effective dynamics of the Regge geometries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective dynamics approximates the classical Regge calculus in the small-γ regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 4 Four-simplex amplitude This section applies the above general procedure to the simplest situation: the 4-simplex amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this case, there is no internal triangle: F = M = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The external parameter r only contains the boundary data r = (jb, ξeb).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The 4-simplex and its dual diagram are illustrated in Figure 2 (a) and (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The points of the 4-simplex v are labelled by (1, 2, 3, 4, 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The five tetrahedra on the boundary are labelled by {e1, e2, e3, e4, e5} = {(1, 2, 3, 4), (1, 2, 3, 5), (1, 2, 4, 5), (1, 3, 4, 5), (2, 3, 4, 5)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' These tetrahedra carry group variable gve ∈ SL(2, C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The triangle is shared by the tetrahedra and carries an SU(2) spin jf, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', the tetrahedron e1 = (1, 2, 3, 4) and the tetrahedron e2 = (1, 2, 3, 5) share the face f1 = (1, 2, 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 2The shared faces are labelled by {f1, f2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', f10} = {(1, 2, 3), (1, 2, 4), (1, 2, 5), (1, 3, 4), (1, 3, 5), (2, 3, 4), (2, 3, 5), (3, 4, 5)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For convenience, in this section, the notations e and f mean that e ∈ {e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', e5} and f ∈ {f1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', f10}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 9 – Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (a) plots the 4-simplex v = (1, 2, 3, 4, 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary comprises five tetrahedra ei sharing ten faces fi 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b) is the dual complex of the 4-simplex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Five boxes correspond to boundary tetrahedra carrying gve ∈ SL(2, C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The strands correspond to triangles carrying spins jf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The circles as endpoints of strands carry boundary states ξef.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The arrows represent the orientations of strands.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 The amplitude and parametrization of variables According to (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1), the EPRL 4-simplex amplitude with the boundary state has the following expression [7–9, 43–45]: Av (jf, ξef) = � � e dgve δiσ3 (gve1) � (CP1)10 eS � f djf π dΩzvf .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) Here, all triangles are on the boundary, jf = jb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To fix the SL(2, C) gauge, gve1 is fixed to be constant matrix diag(i, −i) (the timelike normal of the reference tetrahedron e1 is past-pointing).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The integrand in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) is written as an exponential eS with the action S = � f 2jf ln ⟨ξef, Zvef⟩ ⟨Zve′f, ξe′f⟩ ∥Zvef∥ ∥Zve′f∥ + iγjf ln ⟨Zve′f, Zve′f⟩ ⟨Zvef, Zvef⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) The orientations of dual faces follow from Figure 2(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To study the large-j behavior of the amplitude, we scale all boundary spins jf → λjf by the parameter λ ≫ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The scaling of spins results in the scaling of action S �→ λS, such that the integral (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) can be studied by the stationary phase approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following, we firstly compute the real critical point {˚gve,˚zvf}, which is the solution of the critical equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) and then describe the algorithm to compute the complex critical point in the neighborhood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To obtain the real critical point, we adopt the 4-simplex geometry used in [23, 24, 46] to generate the boundary state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The coordinates of the five vertices Pa in Figure 2(a) in the Minkowski spacetime are set as P1 = (0, 0, 0, 0), P2 = � 0, 0, 0, −2 √ 5/31/4� , P3 = � 0, 0, −31/4√ 5, −31/4√ 5 � P4 = � 0, −2 √ 10/33/4, − √ 5/33/4, − √ 5/31/4� P5 = � −3−1/410−1/2, − � 5/2/33/4, − √ 5/33/4, − √ 5/31/4� (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) – 10 – 1234 1234 j123 J124 J235 1235 1345 J135 /245 /134 J125 J145 1245Then, the 4-d normals of the tetrahedra are Ne1 = (−1, 0, 0, 0), Ne2 = � 5 √ 22, � 3 22, 0, 0 � , Ne3 = � 5 √ 22, − 1 √ 66, 2 √ 33, 0 � Ne4 = � 5 √ 22, − 1 √ 66, − 1 √ 33, 1 √ 11 � , Ne5 = � 5 √ 22, − 1 √ 66, − 1 √ 33, − 1 √ 11 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) The spinor ξef relates to the 3d normals nef by nef = ⟨ξef,⃗σξef⟩ (⃗σ are Pauli matrices).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Regge boundary data of ten areas ˚jf, 3d normals ˚nef and the corresponding spinors ˚ξef of the 4-simplex are listed in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' With the Lorentzian Regge boundary data ˚r = (˚jf,˚ξef), we solve for the real critical point (˚gve,˚zvf) which satisfies Re(S) = ∂gveS = ∂zvf S = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The results in the literature [8, 9] show that there are exactly 2 real critical points, which have the interpretations as the geometrical 4-simplex with opposite 4-orientations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The 4-simplex geometrical interpretation of the critical points results in the same geometry as the one given by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We compute the real critical point following the strategy described in [12, 14, 46], where the boundary data and critical points for a single 4-simplex are studied in detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The data of the real critical point (˚gve,˚zvf) is given in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' By fixing the re-scaling gauge of zvf, each zvf can be parameterized with two real variables xvf, yvf: zvf = (1, xvf + iyvf)T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) gvei, i = (2, 3, 4, 5) are parameterized as � 1 + � x1 ve + iy1 ve � / √ 2 � x2 ve + iy2 ve � / √ 2 � x3 ve + iy3 ve � / √ 2 1+(x2 ve+iy2 ve)(x3 ve+iy3 ve)/2 1+(x1ve+iy1ve)/ √ 2 � , x1 ve, y1 ve, x2 ve, y2 ve, x3 ve, y3 ve ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) Therefore, the 4-simplex action is a function in terms of all real variables x = (xvf, yvf, x1 ve, y1 ve, x2 ve, y2 ve, x3 ve, y3 ve) for all f in {f1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='f10} and e in {e2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='.e5}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point ˚zvf is in the form ˚zvf = (1,˚αvf)T , where ˚αvf = ˚xvf + i˚yvf ∈ C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It is convenient to set one of the critical points at the origin ˚x = {0, 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', 0} by modifying (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) to zvf = (1,˚αvf + xvf + iyvf)T , gve = ˚gve � 1 + � x1 ve + iy1 ve � / √ 2 � x2 ve + iy2 ve � / √ 2 � x3 ve + iy3 ve � / √ 2 1+(x2 ve+iy2 ve)(x3 ve+iy3 ve)/2 1+(x1ve+iy1ve)/ √ 2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7) With the parameterization in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7), the measures dgve and dΩzvf are dgve = 1 128π4 dx1 vedx2 vedx3 vedy1 vedy2 vedy3 ve ���1 + x1ve+iy1ve √ 2 ��� 2 , dΩzvf = dxvf dyvf ⟨Zvef, Zvef⟩ ⟨Zve′f, Zve′f⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) As a result, the 4-simplex amplitude is in the form Av = � d44x µ(x) eλS(r,x), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9) where r = (jf, ξef) are boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The integral is 44 real-dimensional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following, we – 11 – focus on a neighborhood K of ˚x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We have defined the local coordinates x ∈ R44 covering K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Deviating from the shape-matching The amplitude Av has the real critical points with the non-degenerate Regge boundary data ˚r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, the real critical point disappears when the boundary data deviates away from ˚r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Considering a neighborhood U of ˚r in the space of boundary data, such that any r ∈ U (different from ˚r) does not correspond to any Regge geometry or vector geometry3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' If we fix r ∈ U and scale the spins with a large λ, there are two possible behaviors for the amplitude [8, 44] For r = ˚r, the amplitude has two critical points whose geometrical interpretations have opposite orientations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S evaluated at critical points gives the Regge action of the 4-simplex with opposite sign.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore, the asymptotic amplitude of the 4-simplex gives two oscillatory phases Av ≃ λ−12 � N+eiλSRegge + N−e−iλSRegge � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) For r ̸= ˚r, it leads to no solutions to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) and the exponentially suppressed amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To interpolate smoothly between the oscillatory phases and the exponential suppression in the asymptotics (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10), the discussion in section 3 suggests making r vary and introducing the complex critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary data ˚r = {˚jf,˚ξef} of the Lorentzian Regge geometry satisfies the shape-matching condition, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', five geometrical tetrahedra determined by ˚r on the boundary are glued with the triangles matching in shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Consider the 4-simplex action S(r, x) in the neighborhood K × U of (˚r,˚x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We define z ∈ C44 as the complexification of x, and S(r, z) extends holomorphically S(r, x) to a complex neighborhood of ˚x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To avoid confusion, we note that the integration variables x are complexified, while the boundary data r = (jf, ξef) is real.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Next, we let r = ˚r + δr vary, such that the shape-matching condition violates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We describe below a parametrization of the tetrahedron shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' A tetrahedron in R3 is determined by 4 points { ˜Pa, ˜Pb, ˜Pc, ˜Pd} up to a R3 ⋊ O(3) symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We gauge fix the R3 ⋊ O(3) symmetry by choosing ˜Pa at the origin, ˜Pb along the z axis, and ˜Pc within the (y, z)-plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The last point ˜Pd is not constrained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given the tetrahedron’s segment lengths, the coordinates of the points are fixed in R3 by the above gauge fixing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For example, for the tetrahedron e2 = {1, 2, 3, 5}, ˚r implies that the coordinates of the points in R3 are given by ˜P1 = (0, 0, 0), ˜P2 = (0, 0, −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40), ˜P3 = (0, −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70), ˜P5 = (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='651, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='981, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11) All other four tetrahedra can be described similarly, and the coordinates of the points in R3 are determined by ˚r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The 3d face-normals ⃗n implied by the coordinates match with the data in Table 3 up to a simultaneous SO(3) rotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spinors ξ associating with each face are given by ξ = 1 √ 2 �√ 1 + w, x + iy √1 + w �T , if ⃗n = (x, y, w)T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12) When we deform the boundary data, we keep the areas jf = ˚jf unchanged, while ξef are deformed, such that the boundary data r is deformed to violate the shape-matching condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We 3In the Lorentzian EPRL spinfoam amplitude, the critical points corresponding to the non-degenerate Regge geometry are isolated critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 12 – move the vertices ˜Pa ∈ R3 to deform the tetrahedron shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For example, the vertices in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11) are moved to new positions ˜P1 = (0, 0, 0), ˜P2 = (0, 0, −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 + δw(2) 2 ), ˜P3 = (0, −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + δy(2) 3 , −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70 + δw(2) 3 ), ˜P5 = (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='651 + δx(2) 5 , −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='981 + δy(2) 5 , −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70 + δw(2) 5 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13) In the notations δx(a) i , δy(a) i ,δw(a) i , a = 1, · · · , 5 labels the tetrahedron, and i = 1, · · · , 5 labels the variables associated to the vertex ˜Pi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' There are 30 variables δx(a) i , δy(a) i ,δw(a) i in total.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We keep the face areas unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then in each tetrahedron, Heron’s formula gives 4 constraint equations, each corresponding to a face area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For example, in the tetrahedron e2 = {1, 2, 3, 5}, the equations are � � � � � � � � � A123(δw(2) 2 , δy(2) 3 , δw(2) 3 ) = 5 A125(δw(2) 2 , δx(2) 5 , δy(2) 5 , δw(2) 5 ) = 2 A135(δy(2) 3 , δw(2) 3 , δx(2) 5 , δy(2) 5 , δw(2) 5 ) = 2 A235(δw(2) 2 , δy(2) 3 , δw(2) 3 , δx(2) 5 , δy(2) 5 , δw(2) 5 ) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14) At least in a neighborhood of the deformation, δw(2) 2 , δy(2) 3 , δw(2) 3 , δx(2) 5 can be solved in terms of δy(2) 5 , δw(2) 5 from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The shape of the tetrahedron is parameterized by 2 variables δy(2) 5 , δw(2) 5 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This way of parametrization is convenient in our computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, it is different from the known strategy, such as the Kapovich-Millson phase space [47] or using dihedral angles [48].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For each tetrahedron, we adopt the same strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We have in total ten variables B ≡ (δy(1) 4 , δw(1) 4 , δy(2) 5 , δw(2) 5 , δy(3) 5 , δw(3) 5 , δy(4) 5 , δw(4) 5 , δw(5) 5 , δw(5) 5 ) to parameterize the deformation of five tetrahedra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The spinors ξef of each face can be expressed in terms of B according to (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' At this point, the boundary data is r(B) = (jf, ξef(B)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We insert r(B) into the action S(r(B), x) in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2), whose analytical extension is S(r(B), z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then, the complex critical equations are F(B, z) = ∂zS(r(B), z) = 0, from which we solve for the complex critical point z(B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The asymptotics of the 4-simplex amplitude with the boundary data violating the shape-matching condition is given by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here, the complex critical point z(B) inserting into the analytic continued action gives S(r(B), z(B)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In contrast to the Regge action obtained from spinfoam asymptotics in [8], S(r(B), z(B)) is an action of the twisted geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Indeed, S(r(B), z(B)) depends on the degrees of freedom of semiclassical tetrahedra, which are not constrained by the shape-matching condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' These degrees of freedom are beyond the Regge geometry and belong to the twisted geometry of the boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To solve the complex critical point, we can linearize (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14) and obtain the linear solution (δw(2) 2 , δy(2) 3 , δw(2) 3 , δx(2) 5 ) in terms of δy(2) 5 , δw(2) 5 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We can also linearize the complex critical equation at B = (0, · · · , 0), and then solve for the complex critical point z = z(lin)(B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The solution z(lin)(B) is a linear function of the perturbations B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The coefficients in the linear function can be computed numerically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Inserting this linear solution into the action, we obtain S(r(B), z(lin)(B)) as a function of B and expand it to the second order: S(r(B), z(lin)(B)) = QijBiBj + LjBj + S0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15) where the coefficients Qij, Lj can be computed numerically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S0 is the spinfoam action evaluated at the real critical point with B = (0, · · · , 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In Figure 3, we let B = (0, 0, 0, δw(2) 5 , 0, 0, 0, 0, 0, 0), the red curves in (a) and (b) are the real part and imaginary part of S(r(B), z(lin)(B)) with δw(2) 5 varying from -1 to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The linear solution may have a large error when components in B are large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We apply the Newton-Raphson method to numerically search for the solution, which is more accurate than the linear solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To compare with the linear solution in Figure 3, we still only focus on the deformation – 13 – of e2 = {1, 2, 3, 5} and set δy(2) 5 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We outline the procedure in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For any given δw(2) 5 , we can numerically solve equations (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14) for (δw(a) 2 , δy(a) 3 , δw(a) 3 , δx(a) 5 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' There are multiple solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We select the solution that is within a neighborhood at (0, 0, 0, 0), by requiring |δw2 2 + δy2 3 + δw2 3 + δx2 5| ≤ 4|δw2 5|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The coordinates in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13) given by the solution result in the 3d face normal vectors ⃗n and spinors ξ, which are the boundary data r violating the shape-matching condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We apply the Newton-Raphson method to search for the complex critical point satisfying ∂zS = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' An outline of the procedure in the Newton-Raphson method is given in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In Figure 3, the blue curves in (a) and (b) are the real part and imaginary part of the analytically continued action at the complex critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This numerical result (blue curves) and the result from the linear solution (red curves) are close when the deformation is small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, the linear solution is less accurate when the deformation is large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In both panels, the blue curves are the numerical results with the Newton-Raphson method, and the red curves are the results from the linear solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (a) is the real part of the analytically continued action S at the complex critical points varying with δw(2) 5 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b) is the imaginary part of S at the complex critical points varying with δw(2) 5 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The range of δw(2) 5 is [-1,1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 3 demonstrates the smooth interpolation between the oscillatory and exponential suppres- sion behaviors mentioned at the beginning of this subsection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In addition to scaling large λ, we need to consider the smooth deformation B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For any given λ, there exists sufficiently small deformation B beyond the shape-matching, such that Re(S) is small, and thus the amplitude is not suppressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 5 Revisit the ∆3 amplitude In this section, we revisit briefly the existing result on the spinfoam amplitude on the ∆3 complex, for the completeness and preparing the discussion of the double-∆3 complex in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The ∆3 complex contains a single internal face F = 1 but has no internal segment M = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' There is an internal jh that is an integrated variable in the amplitude A(∆3) in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The ∆3 complex and its dual cable diagram are represented in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' All tetrahedra and triangles are spacelike.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Regge geometry on ∆3 is completely fixed by the Regge boundary data {jb, ξeb} that is determined by the boundary segment lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this section, we only focus on the Regge boundary data, in contrast to the discussion of 4-simplex amplitude in the previous section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The generalization to non-Regge boundary data should be straightforward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In terms of the notations in Section 3, we have r = {jb, ξeb} as the boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ˚r = {˚jb,˚ξeb} fixes the flat geometry g(˚r) with deficit angle δh = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ˚x = {˚jh,˚gve,˚zvf} is the real critical point associated to ˚r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The data ˚r, g(˚r), and ˚x are computed numerically in [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 14 – Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (a) illustrates the simplicial complex ∆3 made by three 4-simplices {v1, v2, v3} and 12 tetrahedra ei sharing nineteen faces fi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' There are eighteen boundary faces and one internal face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b) is the dual cable diagram of the ∆3 spinfoam amplitude: The boxes correspond to tetrahedra carrying gve ∈ SL(2, C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The strands stand for triangles carrying spins jf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The strand with the same color belonging to a different dual vertex corresponds to the triangle shared by the different 4-simplices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The circles as the endpoints of the strands carry boundary states |jb, ξeb⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The arrows represent orientations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This figure is adapted from [49].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' According to the general spinfoam amplitude (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16) and the spinfoam action (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17), the ∆3 amplitude A(∆3) can be written as A (∆3) = � kh∈Z 2λ � djhdλjh � [dgdz]eλS(k), S(k) = S + 4πi � h jhkh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) For each kh in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1), the real critical point {˚jh,˚gve,˚zvf} happens only when the boundary data satisfies the accidental flatness constraint (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given the boundary data ˚r corresponding to δh = 0, we consider its neighborhood U in the space of the non-degenerate Regge boundary data, such that any boundary data r ∈ U satisfies |γδh| < 4π.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For large λ, the sectors with kh ̸= 0 do not give dominant contribution to A(∆3) as far as r ∈ U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' If we arbitrarily fix the boundary data r ∈ U and scale λ large, the amplitude has two asymptotic behaviors analogs to the discussion at the beginning of Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 For the boundary data that corresponds to a flat Regge geometry, there is a real critical point, and the amplitude gives an oscillatory phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For the boundary data corresponding to a curved Regge geometry, there are no real critical points, and the amplitude is exponentially suppressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, this way of presenting the asymptotic behavior leads to confusion about the flatness problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' From the discussion in Section 3, it is clear that there is a smooth interpolation between the oscillatory phase and the exponential suppression behaviors, since the boundary data varies – 15 – 124 2 2 1123 1256 134 /245 J126 125 J234 J235 256 3 5 1345 j135 1345 4 6 1456 J456 4 6 3456 J346smoothly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The interpolation is obtained by applying the method of the complex critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The formal discussion of the complex critical point and the asymptotic behavior of this model have been given in [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 5(a) plots eλRe(S) in the asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) versus δh determined by the boundary data and demonstrates the smooth interpolation between the above two asymptotic behaviors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Letting the boundary data vary at the same time as scaling λ, we find the boundary data for the curved geometries with small nonzero δh for any λ, such that the amplitude A(∆3) is not suppressed, shown in Figure 5(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The range of δh for non-suppressed A(∆3) is nonvanishing as far as λ is finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The range of δh is enlarged when γ is small, shown in Figure 5(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' δh that leads to non-suppressed eλ Re[S(Z(r))] satisfies the bound |γδh| ≲ λ−1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) The above result provides evidence for the emergence of curved geometries from the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The bound (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) is consistent with the earlier proposal [11] and the result in the effective spinfoam model [13, 28, 50].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' So far, the bound (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) has only been confirmed in the regime of small or finite γ as we are going to see in Section 7, in the large-γ regime, geometries are violating the bound (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) but still giving a non-suppressed contribution to the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (a) plots eλRe(S) versus the deficit angle δh at λ = 1011 and γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 in A(∆3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The panels (b) and (c) are the contour plots of eλRe(S) as functions of (λ, δh) at γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 and of (γ, δh) at λ = 5 × 1010 in A(∆3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' They demonstrate the (non-blue) regime of curved geometries where the spinfoam amplitude is not suppressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' These figures first appeared in [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 6 Double-∆3 amplitude and effective action 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 Some setups The ∆3 complex does not have any internal segment, and the boundary data determines the Regge geometry completely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' A(∆3) does not give the lI-integral as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) by M = 0, so the effective dynamics of Regge geometry is trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this section, we study the spinfoam amplitude on the “double-∆3” complex (see Figure 6(a)), which is denoted by ∆2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The double-∆3 complex contains a single internal segment, so M = 1, and thus A(∆2 3) gives (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) as 1-dimensional integral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' So the double-∆3 complex admits non-trivial effective dynamics of the Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Note that the same complex is also considered in the context of the effective spinfoam model [50].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The double-∆3 complex glues a pair of ∆3 complex around the internal segment (1, 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex has seven points P1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', P7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The 4-simplices are given by {v1, · · · , v6} = {(1, 2, 3, 4, 6), (1, 2, 3, 5, 6), (1, 2, 4, 5, 6), (1, 2, 3, 4, 7), (1, 2, 3, 5, 7), (1, 2, 4, 5, 7)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 16 – 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='002 eARe(s) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0008 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0006 6 6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0004 O Flat geometry Curved geometry 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='002 2 × 1010 4 ×1010 6 ×1010 8 × 1010 1 × 1011 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10 入 y (b) (c)The tetrahedra are labelled by {e1, · · · , e21}4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' There are twelve boundary tetrahedra and nine internal tetrahedra among them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jh = {j123, j124, j125, j126, j127} are carried by 5 internal triangles, whose dual faces are bounded by red loops shown in the dual diagram Figure 6 (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Since there is only one internal segment (1, 2) and all other segments are on the boundary, the boundary data and the length l12 of the internal segment determine the Regge geometry g(r) on ∆2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Following the Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' A complex made of six simplices sharing the bulk edge (1, 2) with length l12 (the red line in panel (a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In panel (a), the boundary edges are colored black, blue, violet and cyan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The bulk edge is colored red.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b) is the dual complex of the triangulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The internal faces carrying j123, j124, j125, j126, j127 are bounded by red loops, and other faces are boundary faces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' procedure described in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5), we pick up the internal spin j123 and express the spinfoam amplitude as A � ∆2 3 � = � dj123 Z (j123;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb, ξeb) , Z (j123;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb, ξeb) = � {kh} � 4 � ¯h=1 dj¯h 5 � h=1 2λ τ[−ϵ,λjmax+ϵ](λjh) � dµ(g, z) eλS(k), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) where j¯h = {j124, j125, j126, j127}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The external data of Z is rl = {j123(l12);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb, ξeb} including both the boundary data and j123(l12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Identifying γjf to be the area of f (in Planck unit), the Heron’s formula γj123(l12) = 1 4 � 4l2 12l2 13 − (l2 12 + l2 13 − l2 23)2 (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) relates j123 to the internal segment length l12 and boundary segment lengths l13, l23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We consider 4The tetrahedra are {e1, · · · , e21} = {{1, 2, 3, 4}, {1, 2, 3, 6}, {1, 2, 4, 6}, {1, 3, 4, 6}, {2, 3, 4, 6}, {1, 2, 3, 5}, {1, 2, 5, 6}, {1, 3, 5, 6}, {2, 3, 5, 6}, {1, 2, 4, 5}, {1, 4, 5, 6}, {2, 4, 5, 6}, {1, 2, 3, 7}, {1, 2, 4, 7}, {1, 3, 4, 7}, {2, 3, 4, 7}, {1, 2, 5, 7}, {1, 3, 5, 7}, {2, 3, 5, 7}, {1, 4, 5, 7}, {2, 4, 5, 7}}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 17 – X147 1145 12 127 247 113 2 1245 1235 1234 124 2346 j245/ 456 N25 H136 J146 56 J126 6the Regge boundary data that determines all the boundary segment lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We can always make a local change of the real variable j123 → l12 within a neighborhood K of a given Regge geometry, where the correspondence j123 ↔ l12 is 1-to-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following discussion, we only focus on the case with kh = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Regge geometries under consideration are of small deficit angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The following describes the procedure to compute the complex critical points Z(rl) of Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We embed the double-∆3 complex in (R4, ηIJ) and determines a flat Regge geometry with all tetrahedra spacelike.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We assign the following coordinates to the points, P1 = (0, 0, 0, 0), P2 = (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0680, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='220, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='532, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33) , P3 = (0, 0, 0, −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40) , P4 = (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='240, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='694, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='981, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70) , P5 = (0, 0, −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70) , P6 = (0, −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='981, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70) , P7 = (−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='47, −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='89, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='36, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='91) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' From the coordinates, we can compute the length of the segments of the triangulation by using lij = � ηIJ(Pi − Pj)I(Pi − Pj)J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) with ηIJ = Diag({−1, 1, 1, 1}) the Minkowski metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The segment lengths are shown in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The triangles within a 4-simplex are classified into two categories [8]: The triangle corresponds to Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Each cell of the table is the segment length for vertice Pi and Pj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' i lij j 1 2 3 4 5 6 7 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='729 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='62 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='62 4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='729 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='62 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='62 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 the thin wedge if the inner product between the timelike normals of the two adjacent tetrahedra is positive, otherwise the triangle corresponds to the thick wedge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The dihedral angle θv,ei,ej are given by: thin wedge: Nvei · Nvej = cosh θv,ei,ej, thick wedge: Nvei · Nvej = − cosh θv,ei,ej, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) where the inner product is the Minkowski inner product defined by η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then we check the deficit angles δhi associated to the shared triangles hi 0 = δh1 = θv1,e1,e2 + θv2,e2,e6 + θv4,e1,e13 + θv5,e6,e13 ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='514 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='464 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='575 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='404, 0 = δh2 = θv1,e1,e3 + θv3,e3,e10 + θv4,e1,e15 + θv6,e10,e15 ≈ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='30 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='24, 0 = δh3 = θv2,e6,e7 + θv3,e7,e10 + θv5,e6,e17 + θv6,e10,e17 ≈ −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='360 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='481 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='414 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='426, 0 = δh4 = θv1,e2,e3 + θv2,e2,e7 + θv3,e7,e10 ≈ −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='723 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='208 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='931, 0 = δh5 = θv4,e1,e15 + θv5,e13,e17 + θv6,e15,e17 ≈ −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='903 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='20 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='301, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) which implies the Regge geometry is flat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The data of the flat geometry determines the external data ˚rl for the partial amplitude Z, which has the real critical points (˚j¯h,˚gve, ˚zvf) corresponding to this flat Regge geometry and endowing the consistent 4-orientations to all 4-simplices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary – 18 – data of the flat geometry and the real critical point can be found in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1, and Mathematica code can be found in [51] and [52].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this case, given the boundary data, the flat Regge geometry is the solution of the classical Regge equation of motion, and it is also the solution (˚j¯h,˚gve, ˚zvf) to the critical equations from the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We are going to compare the classical Regge dynamics and the spinfoam effective dynamics for curved geometries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This comparison is based on the numerical computations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In concrete, we deform the boundary segment length l35 → l35 + 10−3 but keep the other boundary segment lengths unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary data does not admit any flat geometry on ∆2 3 (see Figure 7(b))5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' With this deformation, a classical Regge solution (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' the solution to the classical Regge equation δSRegge = 0) gives the deficit angles δh1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0118, δh2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0661, δh3 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0215, δh4 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0236, δh5 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0252, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) which implies that the classical Regge dynamics gives curved geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We fix the boundary data and vary the internal segment length l12 = L0 + δL where L0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 is the length l12 in the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The change of l12 is denoted by δL with δL ∈ [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0129, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00251] 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The classical Regge action SRegge as a function of δL is plotted in Figure 7(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The above solution leading to (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) is close to the origin δL = 0 and is denoted by δLRegge c .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' There exists another Regge solution in δL < 0 and far from δL = 0 as shown in Figure 7(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We denote this solution by δ�LRegge c .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Likely, the solution δ�LRegge c is a discretization artifact because when smoothly deforming the boundary data l35 back to the one for the flat geometry, δLRegge c reduces back to the flat solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In contrast, δ�LRegge c still reduces to a curved Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Some boundary data also exist such that the second solution δ�LRegge c disappears.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Nevertheless, we will take into account both solutions δLRegge c and δ�LRegge c in discussing the effective dynamics in Section 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary data (jb, ξef) and the corresponding pseudo-critical points (j0 h, g0 ve, z0 vf) for the curved geometry with the boundary segment length l35 → l35 + 10−3 and the internal edge l12 = L0 + δLRegge c are listed in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Notice that the geometrical areas in the boundary data relate to jb by ab = γjb, and the area ab relates to the lengths lij by Heron’s formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The following discussion involves fixing the geometrical area ab and performing computations at different Barbero-Immirzi parameter γ, so this leads to different jb at different γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Fixing the geometrical area instead of fixing jb is useful when we compare with the Regge action SRegge, since SRegge only depends on the geometrical boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Numerical computing the effective action Given the boundary condition (jb, ξeb) corresponds to the above Regge boundary data with the deformed l35, and given any l12 and j123(l12) taking value inside a neighborhood of the value for the flat geometry, we find the pseudo-critical point (j0 ¯h, g0 ve, z0 vf) close to the real critical point inside the real integration domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point only satisfies Re(S) = ∂gveS = ∂zvf S = 0 but does not necessarily satisfy ∂j¯hS = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point (j0 ¯h, g0 ve, z0 vf) is the critical point of the spinfoam amplitude with fixed jh, jb [9], and endows the Regge geometry g(r) and consistent 4-simplex orientations to ∆2 3 complex7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It reduces to the real critical point (˚j¯h,˚gve,˚zvf) 5If the boundary data admitted a flat Regge geometry on the complex, the flat geometry would be a solution to the Regge equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, the solution of the Regge equation is a curved geometry with the given boundary data, contradicting the assumption of admitting the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 6The range used here is restricted by the existence of curved Regge geometry with all tetrahedra spacelike.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 7Since the correspondence between j123 and l12 is not 1-to-1 globally, it might be possible to have multiple pseudo- critical points corresponding to different Regge geometries with the same value of j123.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, in our numerical analysis, the other l12 from the same j123 does not satisfy the triangle inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore all pseudo-critical points correspond to the same Regge geometry but with different 4-simplex orientations, although we only focus on a fixed orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 19 – Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (a) is the Regge action varying with δL when we deform the boundary segment length l35 → l35 + 10−3 from the boundary data of the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this case, the Regge solutions are given by δLRegge c ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='000439 and δ�LRegge c ≃ −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00834.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b) is � (�5 i=1 δ2 hi)/5 versus δL with the deformed boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' All geometries in the range of δL are not flat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The minimum of � (�5 i=1 δ2 hi)/5 is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' when rl = ˚rl corresponds to the flat geometry on ∆2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As the deformation of segment length l35 is small, this curved geometry is close to the flat geometry, so (j0 ¯h, g0 ve, z0 vf) is close to (˚j¯h,˚gve,˚zvf) in the integration domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The data for the pseudo-critical point is listed in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this computation, we still adopt the similar parametrizations of variables as in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6), and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7), but with the pseudo-critical points as the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The parametrizations of the group element gv1e2, gv2e7, gv3e3, gv4e13, gv5e17, gv6e15, gv1e1, gv2e6, and gv3e10 are upper-triangular matrices due to the SU(2) gauge fixing at 9 internal tetrahedra gve = g0 ve � 1 + x1 ve √ 2 x2 ve+iy2 ve √ 2 0 ∗ � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7) where the entry ∗ is determined by det(gve) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The internal spin j¯h is parametrized as j¯h = j0 ¯h + j¯h, j¯h ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) As a result, for kh = 0, the spinfoam amplitude A(∆2 3) and Z(j123) in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) can be written in the form of A(∆2 3) = � dl12 ���� ∂j123 ∂l12 ���� Z(j123(l12);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb, ξeb), Z(j123(l12);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb, ξeb) ∼ � d241x µ(x)eλS(rl,x), rl = (j123(l12), jb, ξeb) (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9) where x ≡ (x1 ve, y1 ve, x2 ve, y2 ve, x3 ve, y3 ve, xvf, yvf, j¯h).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The parametrizations of (l12, x) define the coordinate chart covering the neighborhood K enclosing ˜x0 = (j123, x0) = (j0 h, g0 ve, z0 vf), and ˚˜x = (˚j123,˚x) = (˚jh,˚gve,˚zvf).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This neighbourhood is large enough since the parametrizations are valid generically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point is x0 = (0, 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', 0), which contains 241 zero components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here we use “∼” instead of “=” because (1) we only consider kh = 0 but ignore other kh terms8, (2) we only focus on the contribution from the neighborhood K enclosing a single pseudo-critical point9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 8The integrals in the neighborhood K with kh ̸= 0 give exponentially suppressed contributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 9there may exist other pseudo-critical points outside K in Z, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' the ones corresponding to different orientations – 20 – SL SLIn our discussion, we only consider the effective dynamics within a sector of Regge geometries with the fixed 4d orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We compute the complex critical point of Z for any given external data rl: Here, both S(r, x) and µ(x) are analytic in the neighborhood K of x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S(r, x) can be analytically continued to a holomorphic function S(rl, z), and z ∈ C241 is in a complex neighborhood of x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The analytic continuation is obtained by simply extending x ∈ R241 to z ∈ C241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The formal discussion of the analytic continuation of the spinfoam action is given in [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We fix the boundary data to be the one resulting in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) and vary the length l12 = L0 + δL, where L0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 (the value of l12 in Table 1) and the change of l12, δL ∈ [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0129, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00251].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For any given δL, combining the boundary data, we repeat the steps above (from the beginning of this subsection) to reconstruct the Regge geometry and the corresponding pseudo-critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Taking the pseudo-critical point as the starting point, we apply the Newton-Raphson method by repeating the steps in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) - (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) to numerically compute the complex critical point Z(rl) for a sequence of δL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' By evaluating S at the complex critical point and apply the asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8), we obtain the following asymptotic behavior of Z and A(∆2 3) for the dominant contribution from the integral on K Z (j123(l12);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' jb, ξeb) ∼ � 1 λ � 241 2 Nl eλS(rl,Z(rl)) [1 + O(1/λ)] , A � ∆2 3 � ∼ � 1 λ � 241 2 � dl12 ���� ∂j123 ∂l12 ���� Nl eλS(rl,Z(rl)) [1 + O(1/λ)] , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) where Nl = µ(Z(rl)) det � −∂2 z,zS(rl, Z(rl))/2π �−1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Effectively, A � ∆2 3 � gives a path integral of Regge geometry on ∆2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' S (rl, Z (rl)) is the effective action for the Regge geometry in the large-λ regime of the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The stationary phase approximation of the l12-integral in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) relates to the variation of S (rl, Z (rl)) with respect to l12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective equation of motion ∂l12S (rl, Z (rl)) = 0 (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11) determines the effective dynamics of Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 Comparing to Regge action It is interesting to compare the effective action S (rl, Z (rl)) to the classical Regge action SRegge since both actions define the dynamics of Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The definition of Regge action SRegge(l12) is reviewed in Appendix D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In order to compare, we compute and plot the real and imaginary parts SR and SI of S (rl, Z (rl)) respectively, S (rl, Z (rl)) = SR(γ, δL) + iSI(γ, δL), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12) We view both SR and SI as functions of two variables γ and δL, and we compute the numerical values of SR and SI with samples of γ ∈ [10−9, 106] and δL ∈ [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0129, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00251].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It is known that the spinfoam action contains an overall phase, which needs to be subtracted to compare to the Regge action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We denote the overall phase by φ(γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This overall phase can be computed numerically by inserting the pseudo-critical point (j0 ¯h, g0 ve, z0 vf) in the spinfoam action S and subtracting the Regge action at the corresponding geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Generally, we have φ(γ) = α/γ (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13) of 4-simplices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' But our discuss only focuses on the critical points inside K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 21 – Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The red curves plots the Regge action as a function of δL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In comparison to the Regge action, the blue curves plots S′ I of the analytic continued spinfoam action at complex critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The green curve plots the real part SR of the analytic continued spinfoam action at complex critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' where the coefficient α depends on the boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In terms of the spinfoam variables, the overall phase comes from the γ-independent terms in S and is linear to the boundary spins φ ∼ jb, but here we fix the boundary area and let γ vary, then φ ∼ ab/γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The numerical value of α is α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='003993 resulting from our setup of the boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In general, the overall phase in the spinfoam action can be cancelled by the phase choice of boundary ξeb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To remove the overall phase from SI, we define S′ I by SI(γ, δL) = −S′ I(γ, δL) + φ(γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14) S′ I as a function of δL is compared to the classical Regge action for different values of γ in Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=" The minus sign in front of S′ I relates to the 4-simplex orientation in the real and pseudo-critical – 22 – Regge Action S' at complex critical points --- Sr at complex critical pointsFigure 9." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panels (a) and (b) are log-log plots of the distances (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) between the spinfoam and Regge solutions in a neighbourhood of δL = 0 as a function of γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The boundary data has the boundary segment length l35 deformed from the flat geometry by l35 → l35 + 10−3 for (a) and l35 → l35 + 10−10 for (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panels (a) show the real part of the spinfoam solution δLSpinfoam c v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' log-scaled γ value with the boundary data deformed from the flat geometry by l35 → l35 + 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panels (b) is the log-log plot of the absolute value of the imaginary parts of the spinfoam solution δLSpinfoam c as a function of γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As indicated by Figure 8, S′ I well-approximates the Regge action for small γ with negligible corrections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When increasing γ, S′ I gives nontrivial corrections to the Regge action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For any given γ, the real part SR is always negative, and |SR| is larger for larger |δL|, so eλS is smaller for larger |δL|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, if we fix δL and vary γ, |SR| is smaller so eλS is less suppressed for any λ, when γ is smaller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In other words, the smaller γ opens a larger range of δL, in which |SR| is small and eλS is not suppressed for a given λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this range of δL, the numerical result indicates that S (rl, Z (rl)) well-approximates the Regge action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The similar situation has appeared in the ∆3 amplitude, where the amplitude with smaller γ admits a wider range of curved geometries (see Figure 5(c)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 7 Solutions of effective dynamics on double-∆3 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 Spinfoam complex critical point and the Regge solution δLRegge c The above discussion compares the effective action S(rl, Z(rl)) to the classical Regge action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It is also interesting to compare the solution of the effective equation ∂l12S(rl, Z(rl)) = 0 to the solution of the Regge equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' By the above computation, the real and imaginary parts of S(rl, Z(rl)) are obtained as the numerical function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Numerically solving the effective equation involves finding – 23 – 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='37 × 10-11 5Figure 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The log-log plot of the average of the absolute value of the imaginary part of the complex critical point v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panels (a) are the log-log plot of the negative real parts of ˜S(r′, δL, z) at the complex critical points z = ˜Z(r′, δL) as a function of γ with the boundary data deformed from the flat geometry by l35 → l35 + 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panels (b) show the imaginary parts of ˜S(r′, δL, z) at the complex critical points z = ˜Z(r′, δL) v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' log-scaled γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We subtract the overall phase φ(γ) from Im[ ˜S(r′, δLSpinfoam c , ˜Z)] and add a minus sign in plotting (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In Panel (b), the overall phase φ(γ) ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='003993γ−1, and the maximum and minimum of the plot range are Maxa ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='121606 and Mina ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='121596.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' the possible complex roots of numerical derivatives of the complex S(rl, Z(rl)), which requires an estimation of S(rl, Z(rl)) on the complex δL plane and may give a relatively large numerical error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the following, we introduce an alternative strategy, which computes the solution of the effective equation more efficiently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Instead of introducing the partial amplitude Z, we consider the full spinfoam amplitude, which can be written as the following integral for the same contribution as in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10) A(∆2 3) ∼ � dδLd241x µ(δL, x)eλ ˜S(r′,δL,x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) Here the external parameter r′ is just the boundary data r′ = (jb, ξeb).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ˜S(r′, δL, x) is the spinfoam action S with j123 = j123(l12) and l12 = L0 + δL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Recall that δLRegge c is a solution of the classical Regge equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Regge geometry with δLRegge c corresponds to a pseudo-critical point of ˜S(r′, δL, x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Both ˜S(r′, δL, x) and µ(δL, x) are analytic in the neighbourhood of this pseudo-critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore, ˜S(r′, δL, x) and µ(δL, x) can be analytic continued to the holomorphic functions ˜S(r′, δL, z) and µ(δL, z), where (δL, z) ∈ C242 – 24 – 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content="18 × 10-6[S(r', SLSpinfoam [S(r', SLSpinfoam Maxa Minais in a complex neighborhood of the pseudo-critical point." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We fix the boundary data r′ to be the same as the one used in Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Since r′ is a small deformation from the boundary data of the flat geometry, the neighbourhood covers the real critical point corresponding to the flat geometry and the boundary data before the deformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For each γ, we would like to numerically compute the complex critical points (δL, z) = (δLSpinfoam c , ˜Z)(r′) as the solution to the following equations, ∂z ˜S(r′, δL, z) = 0, (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) ∂δL ˜S(r′, δL, z) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) Since we fix the boundary data r′ and vary γ, the complex critical points give a continuous trajectory parametrized by γ in the complex space of (δL, z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the numerical computation, we sample a sequence of γ ∈ [10−9, 106] and compute the complex critical point for each γ by the Newton-Raphson method, following the steps in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) - (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For any γ, the recursion of the Newton-Raphson method can be initialized at the pseudo-critical point and give the convergent result within the desired tolerance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Moreover, all resulting complex critical points depend smoothly on the boundary data δl35 and reduces to the real critical point when δl35 → 0 (see Figure 13 for an example).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The red points are the list-plot of the norm of the complex critical point (δLSpinfoam c , ˜Z) v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' the deformation of the boundary segment length δl35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For any complex criti- cal points (δLSpinfoam c , ˜Z) = (δLSpinfoam c , z1, z2, · · · , z241), the norm is defined as ∥(δLSpinfoam c , ˜Z)∥ = ����δLSpinfoam c ��� 2 + |z1|2 + |z2|2 + · · · + |z241|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here, the boundary segment length l35 is deformed from the flat geometry by l35 → l35 + δl35 at γ = 10−6, δl35 ∈ [0, 10−3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The blue point is the complex critical point as δl35 = 10−3, and the green point is the real critical point at the origin (0, 0) corresponding to the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The cyan curve represents the fitted function ∥(δLSpinfoam c , ˜Z)∥ ≃ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97×106 δl35−5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49×107 (δl35)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The solution δL from (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) and (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) is the same as the solution of ∂δLS(rl, Z(rl)) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Indeed, 0 = ∂δLS(rl, Z(rl)) = ∂S(rl, Z(rl)) ∂rl ��� Z(rl) · ∂rl ∂δL + ∂S(rl, Z(rl)) ∂Z(rl) ��� rl ∂Z(rl) ∂δL = ∂S(rl, Z(rl)) ∂rl ��� Z(rl) · ∂rl ∂δL = [∂δLS(rl, z)]z=Z(rl) , (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) where we have used ∂S(rl, Z(rl))/∂Z(rl)|rl = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Z(rl) depends on δL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' z = Z(rl) is the solution of (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2), when analytic continuing δL → δL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The result [∂δLS(rl, z)]z=Z(rl) = 0 from (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4), followed by analytic continuing δL → δL, is equivalent to (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) with the solution of (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) inserted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical point gives δL ≡ δLSpinfoam c (γ) as a trajectory parametrized by γ in a complex neighborhood at δL = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This solution is compared to the Regge solution δLRegge c ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='000439 – 25 – Figure 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (a) is the log-log plot of the distance between the spinfoam solution and the Regge solution in a neighborhood of δ�L = δ�LRegge c as a function of γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b1) shows the real of the spinfoam solution δ�LSpinfoam c v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (b2) is the log-log plot of the imaginary parts of the spinfoam solution δ�LSpinfoam c v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (c1) is the real parts of ˜S(r′, δ �L, z) at the complex critical points v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' γ, and the small figure in (c1) is the log-log plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Panel (c2) plots the imaginary parts of ˜S(r′, δ �L, z) at the complex critical points v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (recall Figure 7(a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This solution δLSpinfoam c (γ) is complex generically, although it is close to the real axis, especially for small γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 9 (a) demonstrates the distance (in the complex plane) between the spinfoam solution δLSpinfoam c (γ) and the classical Regge solution δLRegge c : ��δLSpinfoam c (γ) − δLRegge c �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) This distance is small in the small-γ regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' So the classical Regge dynamics is reproduced by the spinfoam effective dynamics for small γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This result is consistent with comparing the actions in Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This result is also consistent with some earlier arguments in [18–21] about the semiclassical approximation of spinfoams with small γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=" – 26 – [sLSpinfoam SLRegge c Im[SLSpinfoam] Re[S(r', SLSpinfoam, 2)] Im[S(r', 8LSpinfoam, Z)) + p() 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='135: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='130 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content="125 [Re[S(r', 8LSpinfoam, 2)] 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='120 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='115 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='110 10-10 10-7 10-4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 100Figure 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure is the log-log plot of eλRe[ ˜ S(r′,δLSpinfoam c , ˜ Z)] (blue curve) and eλRe[ ˜ S(r′,δ �LSpinfoam c , ˜ Z)] (red curve) as a function of λ at γ = 10−8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The distance (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) becomes larger when increasing γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It indicates that the spinfoam amplitude with larger γ gives larger correction to the classical Regge solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore the effective theory in the large-γ regime has more significant difference from the Regge gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Furthermore, the distance (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) stabilizes in the large-γ regimes, as shown in Figure 9(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The distance value where it stablizes becomes smaller when the boundary data is closer to the one for the flat geometry, by comparing Figure 9(a) and (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The small and large γ regimes might be viewed as two phases of the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective dynamics is closer to the Regge dynamics for small γ but more different from the Regge dynamics for large γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The critical point (δLSpinfoam c , ˜Z)(r′) is generally complex for every γ (see Figure 11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 12(a) and (b) plot the analytic continued action ˜S(r′, δL, z) (with the overall phase φ(γ) removed) evaluated at the complex critical points for a large number of samples of γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real part Re( ˜S) is close to zero for both the small-γ and large-γ regimes, so eλ ˜ S in the asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) is not suppressed for large λ for both the small and large γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The non-suppressed eλ ˜ S for small γ has been anticipated since it can be predicted by the bound (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' But the non-suppressed eλ ˜ S with large λ in the large-γ regime violates the bound (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This result suggests that the bound (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) is not universal but only valid for the small or finite γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figures 9(b) plots ��δLSpinfoam c − δLRegge c �� for the different boundary data, which deform the boundary data of the flat geometry by l35 → l35 + 10−10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This boundary data is closer to the boundary data for the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The results are qualitatively similar to the results from the previous boundary data, although the maximum of ��δLSpinfoam c − δLRegge c �� become smaller comparing to the results from the previous boundary data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Changing the boundary data seems not to shift the location in the γ-space, where the small-γ phase (where (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) is small) transits to the large-γ phase (where (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) is stablizes), as suggested by comparing Figures 9 (a) and (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Complex critical point and the other Regge solution δ�LRegge c Recall Figure 7(a) that there is another classical Regge solution δL = δ�LRegge c with the boundary condition under consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This solution corresponds to a different pseudo-critical point, which we use as the starting point of initializing the recursion in the Newton-Raphson method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Following the same procedure discussed above, we obtain a new trajectory of complex critical points parameterized by γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical point gives δL = δ�LSpinfoam c (γ), which is generically complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Figure 14 plots the distance |δ�LSpinfoam c (γ) − δ�LRegge c |, the real and imaginary part of the δ�LSpinfoam c (γ), and the real and imaginary part of the action ˜S evaluated at the complex critical points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For small γ, δ�LSpinfoam c (γ) is approximately real and close to the classical Regge solution δ�LRegge c .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Increasing γ results in that δ�LSpinfoam c (γ) makes larger corrections to δ�LRegge c .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 27 – e^Re(S)Both the complex critical point here, denoted by (δ�LSpinfoam c , ˜Z)(r′), and (δLSpinfoam c , ˜Z)(r′) discussed in the last subsection give contributions to A(∆2 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' When we compare their contributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' eλS is suppressed faster at the critical point here than at the one in the last subsection (see Figure 15) for fixed γ < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' This relates to the fact that δ�LRegge c gives larger deficit angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore the complex critical point here contributes to the amplitude much less than the one in the last subsection for generic small γ and large λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Recall that δ�LRegge c likely relates to the discretization artifact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The result suggests that the spinfoam amplitude should suppress the contribution from the discretization artifact, in favor of a good continuum limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical points used in Figure 14 are likely beyond the stationary phase approxima- tion (for complex action) described above and below (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7), because these complex critical points do not analytically relate to the real critical point (˚jh,˚gve,˚zvf) for the flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It relates to the existence of complex critical points with Re( ˜S) > 0 in Figure 14(c1) violating (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Indeed, when we continuously deform the boundary data r′ by the deformation by l35 → l35 + δl35 from the boundary data of flat geometry to the one that does not admit flat geometry, the solution of (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) and (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) deforms analytically from the real critical point to the previous complex critical point (δLSpinfoam c , ˜Z)(r′) (see Figure 13, and the similar property holds for the complex critical points in Section 6), but not to any of the complex critical points used in Figure 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The complex critical point used in Figure 14 has to be studied by the fully-fledged Picard- Lefschetz theory (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [23, 53, 54]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Consequently, given that the spinfoam amplitude is defined on the real integration cycle where Re(S) ≤ 0, the complex critical point with Re( ˜S) > 0 does not contribute to the asymptotics of the amplitude, because the steepest-ascent flow associated to this critical point turns out to have no intersection with the real integration cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Therefore, the contributions from the complex critical points in Figure 14 are vanishing or suppressed for finite or larger γ, where Re(S) > 0 or eλRe(S) is suppressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 8 Conclusion and Outlook Our above analysis demonstrates the importance of complex critical points in understanding the asymptotic behaviour of the spinfoam amplitude in the large-j regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the case of the 4-simplex amplitude, taking into account the complex critical point generalizes the asymptotics to non-Regge boundary data and relates to the twisted geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In the case of the simplicial complex, the complex critical point plays an important role in deriving the effective dynamics from the spinfoam amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective dynamics closely relate to the Regge gravity in the small γ regime, as demonstrated by the numerical computation for the amplitude on the double-∆3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Our work provides a general procedure to derive the effective theory in the large-j regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' From the perspective of semiclassical analysis, our numerical computation should be generalized to triangulations larger than double-∆3, which has more internal segments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' One should check if the Regge gravity still can be reproduced by the large-j effective dynamics on larger triangulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective dynamics in LQG has been primarily investigated in the context of symmetry- reduced models, such as Loop Quantum Cosmology (LQG) and black holes, see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [55, 56].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The effective dynamics is useful in deriving the singularity resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Our result shows that the spinfoam amplitude also results in certain effective dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, this effective dynamics is in terms of the discrete Regge geometry, in contrast to the effective dynamics in terms of smooth fields in LQC and black holes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' A research in progress is to understand if the effective dynamics from the spinfoam amplitude can relate to LQC and black holes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' If the relation exists, it might provide a new approach toward embedding LQC and black hole models in the full theory of LQG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' It is also interesting to investigate the behavior of the effective dynamics under the lattice refinement for spinfoam amplitudes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Regge geometries approach to the continuum limit under – 28 – the refinement, so we expect that the effective dynamics of Regge geometries from spinfoams should reduce to certain effective dynamics of the smooth geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Acknowledgments The authors acknowledge the helpful communications with Bianca Dittrich, Carlo Rovelli, and Simone Speziale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' receives support from the National Science Foundation through grants PHY-1912278 and PHY-2207763, and the sponsorship provided by the Alexander von Humboldt Foundation during his visit at FAU Erlangen-N¨urnberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In addition, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' acknowledges IQG at FAU Erlangen-N¨urnberg, IGC at Penn State University, Perimeter Institute for Theoretical Institute, and University of Western Ontario for the hospitality during his visits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Research at Perimeter Institute is supported in part by the Government of Canada through the Department of Innovation, Science and Economic Development and by the Province of Ontario through the Ministry of Colleges and Universities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' A Boundary data for single 4-simplex In Section 3, we introduce the real critical points of the 4-simplex, which corresponds to the Regge geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We construct the Regge boundary geometry, Table 2, 3 and 4 record areas ˚af = γ˚jf, 3d normals ˚nef and the corresponding spinors ˚ξef of the single 4-simplex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Each cell shows the area of the face shared by line number tetrahedra and column number tetrahedra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e ˚af e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 5 5 e2 2 2 e3 5 2 e4 2 2 e5 5 2 Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Each cell shows the 3d normal vectors of the face shared by line number tetrahedra and column number tetrahedra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e ˚nef e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, 0, 0) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='333, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='943, 0) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='333, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='471, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='816) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='333, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='471, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='816) e2 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='938, 0, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='346) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='782, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='553, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='289) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='948, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='276, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='160) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='616, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='276, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='738) e3 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='313, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='884, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='346) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='782, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='553, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='289) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0553, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='986, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='160) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0553, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='673, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='738) e4 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='244, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='345, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='907) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='739, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='215, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='639) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0431, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='768, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='639) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0862, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='122, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='989) e5 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='436, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='617, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='655) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='859, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='385, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='338) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0771, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='938, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='338) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='154, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='218, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='964) Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Each cell shows a spinor ξef corresponding to a 3-normal to the face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e ˚ξef e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='707, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='707) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='707, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='236 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='667i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='953, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='175 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='247i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='953, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='175 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='247i) e2 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='820, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='572) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='803, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='487 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='344i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='762,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='622 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='181i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='932, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='330 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='148i) e3 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='572, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='273 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='774i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='596, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='655 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='463i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='648, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='043 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='761i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='362, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='076 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='929i) e4 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='976, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='125 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='177 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='905, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='408 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='119 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='425, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='051 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='904i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='997, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0432 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0611i) e5 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='910, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='240 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='339 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='818, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='525 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='236i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='576, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='067 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='815 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='991, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0778 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1100) Table 5 and 6 record the values of the real critical point ˚gve and ˚zvf for the 4-simplex with the boundary data (˚jf,˚ξef).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' All the Regge boundary data ˚r = (˚jf,˚ξef) and the data of the real critical point (˚gve,˚zvf) for the 4-simplex amplitude can be found in the Mathematica notebook [57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 29 – Table 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Each cell of the table is the critical point of ˚gve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e1 e2 e3 e4 e5 ˚gve � 0 −i −i 0 � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='969i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='358i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='969i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='337 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='119i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='855i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='149 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='105i � � 0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='874i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='199 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='141i � Table 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Each cell shows the critical points of ˚zvf e ˚zvf e′ e1 e2 e3 e4 e5 e1 (1,-1) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='57i) e2 (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='915 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='402i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31i) e3 (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='333 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='943i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='086 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='690i) e4 (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72 + 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08i) e5 (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 − 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='57i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='071 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='470i) B The Newton-Raphson method The Newton-Raphson method for the single-variable equation f(x) = 0 is initialized with a starting point x0, and then one iterate xn+1 = xn − f (xn) f ′ (xn), (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) to approach the solution with higher accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In single 4-simplex case as an example, the equations of motion is 44 dimensions, we denote by F � � � � �� z1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' z44 � �� � � � = � �� f1(z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', z44) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' f44(z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', z44).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' � �� (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) The derivative of this system is the 44×44 Jacobian given by: J(z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=', z44) = � �� ∂f1 ∂z1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ∂f1 ∂z44 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ∂f44 ∂z1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ∂f44 ∂z44 � �� (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) We define the function G by G(z) = z − J(z)−1F(z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) The functional Newton-Raphson method for nonlinear systems is the iteration procedure that evolves from the initial z(0), which in our case is the real critical point ˚x, and generates z(k) = G � z(k−1)� = z(k−1) − J � z(k−1)�−1 F � z(k−1)� , k ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) We can write this as � ��� z(k) 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' z(k) 44 � ��� = � ��� z(k−1) 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' z(k−1) 44 � ��� + � ��� ∆z(k−1) 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ∆z(k−1) 44 � ��� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) – 30 – where � ��� ∆z(k−1) 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ∆z(k−1) 44 � ��� = −J � z(k−1)�−1 F � z(k−1)� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7) We set the desired tolerance ϵ = 10−100, and we stop after n iterations when ����(∆z(n−1) 1 )2 + · · · + (∆z(n−1) 44 )2 ��� < ϵ (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8) The resulting z(n) is the approximated solution within the tolerance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We evaluate the analytic continued 4-simplex action S at z(n) and apply it to the asymptotic formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' C Boundary data for the ∆2 3 complex C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 Boundary data and the real critical point for the flat ∆2 3 complex We construct the flat geometry with the segment lengths in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The corresponding boundary data for flat geometry is shown in Table 7, 8, 9, 10, 11 and 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Here, the area af and the spins jf satisfy af = γjf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Table 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (˚ab,˚ξeb) for the 4-simplex v1 = {1, 2, 3, 4, 6} e ˚ξeb e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='52i) e2 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='61 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='76i) e3 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='078 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='033i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 - 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) e4 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='60, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='66 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='46i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='76, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='65i) e5 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='43, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='88i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31i) e ˚ab e’ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75 e2 5 e3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 e4 2 2 e5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 Table 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (˚ab,˚ξeb) for the 4-simplex v2 = {1, 2, 3, 5, 6} e ˚ξeb e′ e′ 2 e′ 6 e′ 7 e′ 8 e′ 9 e2 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 i) e6 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='59i) e7 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='27 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94i) e8 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='24 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='67 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 i) e9 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='74, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='67 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='048 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='068i) e ˚ab e′ e′ 2 e′ 6 e′ 7 e′ 8 e′ 9 e2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 e6 5 e7 5 e8 5 5 e9 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Table 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (˚ab,˚ξeb) for the 4-simplex v3 = {1, 2, 4, 5, 6} e ˚ξeb e′ e′ 3 e′ 7 e′ 10 e′ 11 e′ 12 e3 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97 i) e7 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='073i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99i) e10 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='065 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11 i) e11 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='43, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25i) e12 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='018 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='025 i) e ˚ab e′ e′ 3 e′ 7 e′ 10 e′ 11 e′ 12 e3 2 e7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 e10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69 e11 5 2 e12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 2 Table 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (˚ab,˚ξeb) for the 4-simplex v6 = {1, 2, 4, 5, 7} e ˚ξeb e′ e′ 10 e′ 14 e′ 17 e′ 20 e′ 21 e10 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='20 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='91 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='35 i) e14 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='46 i) e17 e20 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='76, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='61 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='74, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='57 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='36 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='85, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='52 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 i) e21 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='39, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='89 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='23 i) e ˚ab e′ e′ 10 e′ 14 e′ 17 e′ 20 e′ 21 e10 2 e14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 e17 e20 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 e21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 – 31 – Table 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (˚ab,˚ξeb) for the 4-simplex v4 = {1, 2, 3, 4, 7} e ˚ξeb e′ e′ 1 e′ 13 e′ 14 e′ 15 e′ 16 e1 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='56 i) e13 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='52 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='35 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='32 i) e14 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='59 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='35 i) e15 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='90, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='63, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i) e16 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='28 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18i) e ˚ab e′ e′ 1 e′ 13 e′ 14 e′ 15 e′ 16 e1 2 e13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 e14 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 e15 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 e16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 Table 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (˚ab,˚ξeb) for the 4-simplex v5 = {1, 2, 3, 5, 7} e ˚ξeb e′ e′ 6 e′ 13 e′ 17 e′ 18 e′ 19 e6 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='63 i) e13 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='48 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 i) e17 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95 i) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='06 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97 i) e18 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='90, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='43) e19 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='26 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='65 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 i) e ˚ab e′ e′ 6 e′ 13 e′ 17 e′ 18 e′ 19 e6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 e13 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 e17 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 e18 5 e19 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Once the flat geometry is constructed, the real critical points � ˚jh,˚gve,˚zvf � can be obtained by solving the critical equations Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The solution of the critical point equations relates to the Lorentzian Regge geometry, as described in [8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ˚gve relates to the Lorentzian transformation acting on each tetrahedron and glueing them together to form the ∆2 3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' In this model, we fix gve to be constant SL(2, C) matrices for v1e5, v2e9, v3e12, v4e16, v5e19, v6e21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The group elements gve for the bulk tetrahedra v1e1, v1e2, v2e6, v2e7, v3e3, v3e10, v4e13, v5e17, v6e14 are fixed to be the upper triangular matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For the ∆2 3 triangulation, there are five internal faces h(12k) with k = 3, 4, 5, 6, 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The areas of these internal faces are shown in Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The numerical results of the real critical Table 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Areas of internal faces h in ∆2 3 complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ah(123) ah(124) ah(125) ah(126) ah(127) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='971 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='333 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='78 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='93 point (˚gve, ˚zvf) corresponding to the flat geometry are listed in Table 14, 15, 16, 17, 18 and 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Table 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (˚gve, ˚zvf) for the 4-simplex v1 = (1, 2, 3, 4, 6) e e1 e2 e3 ˚gv1e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='42 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04i 0 1 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i 0 1 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 � e e4 e5 ˚gv1e � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='91i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='46 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12i � e |˚zv1f⟩ e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45i) e2 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94i) e3 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5i) (1,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 + 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) e4 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40i) (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i) e5 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 - 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4i) Table 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (˚gve, ˚zvf) for the 4-simplex v2 = (1, 2, 3, 5, 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e2 e6 e7 ˚gv2e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='32 0 1 � �1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='031 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='044i 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 � e e8 e9 ˚gv2e � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i 0 � � 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 � e |˚zv2f⟩ e′ e′ 2 e′ 6 e′ 7 e′ 8 e′ 9 e2 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) e6 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49i) (1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49i) e7 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) e8 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 - 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 i) e9 (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i) (1,4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9 + 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 i) Table 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (˚gve, ˚zvf) for the 4-simplex v5 = (1, 2, 3, 5, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e6 e13 e17 ˚gv5e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='32 0 1 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � e e18 e19 ˚gv5e � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='88i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i � e |˚zv5f⟩ e′ e′ 6 e′ 13 e′ 17 e′ 18 e′ 19 e6 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i) e13 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='83 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='56i) e17 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1,1,-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) e18 (1,-1) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i) e19 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='54i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 i) – 32 – Table 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (˚gve, ˚zvf) for the 4-simplex v3 = (1, 2, 4, 5, 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e3 e7 e10 ˚gv3e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 � �1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='031 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='044i 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='38 0 1 � e e11 e12 ˚gv3e � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12i � e |˚zv3f⟩ e′ e′ 3 e′ 7 e′ 10 e′ 11 e′ 12 e3 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69i) (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i) e7 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5i) (1, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9 + 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) e10 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08i) e11 (1,1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i) e12 (1,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 + 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) (1,5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7 + 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 i) Table 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (˚gve, ˚zvf) for the 4-simplex v4 = (1, 2, 3, 4, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e1 e13 e14 ˚gv4e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='42 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04i 0 1 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 � e e15 e16 ˚gv4e � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='79i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01i � e |˚zv4f⟩ e′ e′ 1 e′ 13 e′ 14 e′ 15 e′ 16 e1 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 i) e13 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='54i) e14 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77i) e15 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='83 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='56i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 - 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i) e16 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81i) Table 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (˚gve, ˚zvf) for the 4-simplex v6 = (1, 2, 4, 5, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e10 e14 e17 ˚gv6e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='38 0 1 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � e e20 e21 ˚gv6e � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='93i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 − 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3i � e |˚zv6f⟩ e′ e′ 10 e′ 14 e′ 17 e′ 20 e′ 21 e10 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i) e14 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1,-1+0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81i) e17 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9+2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i) e20 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77i) (1,-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4i) e21 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08i) (1,-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2+0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) All the boundary data ˚r = (˚jb,˚ξeb) and the data of the real critical point (˚jh,˚gve,˚zvf) can be found in the Mathematica notebook in [57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Boundary data and the pseudo critical points for the curved ∆2 3 complex The boundary data in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 admits a flat geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' To construct a curved geometry, we deform the segment length l35 → l35 +10−3 and keep the other boundary segment lengths unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' We list the boundary data for this curved geometry in Table 20, 21, 22, 23, 24 and 25 as the internal segment length is l12 = L0 + δLRegge c .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Table 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v1 = {1, 2, 3, 4, 6} e ξeb e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='53i) e2 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='61 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22i, - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='76i) e3 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='079 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='033i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 - 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) e4 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='60, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='66 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='46i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='76, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='65i) e5 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='43, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='88i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31i) e ab e’ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75 e2 5 e3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 e4 2 2 e5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 Table 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v2 = {1, 2, 3, 5, 6} e ξeb e′ e′ 2 e′ 6 e′ 7 e′ 8 e′ 9 e2 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69i) e6 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='59i) e7 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='27 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94i) e8 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='24 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='67 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 i) e9 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='74, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='67 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='049 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='065i) e ab e′ e′ 2 e′ 6 e′ 7 e′ 8 e′ 9 e2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 e6 5 e7 5 e8 5 5 e9 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 Table 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v5 = {1, 2, 3, 5, 7} e ξeb e′ e′ 6 e′ 13 e′ 17 e′ 18 e′ 19 e6 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='64 i) e13 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='48 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 i) e17 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95 i) (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97 i) e18 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='90, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='43) e19 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='26 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='66 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='24 i) e ab e′ e′ 6 e′ 13 e′ 17 e′ 18 e′ 19 e6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 e13 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 e17 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 e18 5 e19 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 – 33 – Table 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (ab, ξeb) of curved geometry for the 4-simplex v3 = {1, 2, 4, 5, 6} e ξeb e′ e′ 3 e′ 7 e′ 10 e′ 11 e′ 12 e3 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97 i) e7 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='105 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='072i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99i) e10 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='065 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='106 i) e11 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='43, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25i) e12 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17i) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='018 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='025 i) e ab e′ e′ 3 e′ 7 e′ 10 e′ 11 e′ 12 e3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 e7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 e10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69 e11 5 2 e12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 2 Table 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (ab, ξeb) of curved geometry for the 4-simplex v4 = {1, 2, 3, 4, 7} e ξeb e′ e′ 1 e′ 13 e′ 14 e′ 15 e′ 16 e1 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='57 i) e13 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='52 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='35 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='32 i) e14 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='58 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='35 i) e15 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='90, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='41 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='63, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71 i) e16 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='25 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='28 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='18i) e ab e′ e′ 1 e′ 13 e′ 14 e′ 15 e′ 16 e1 2 e13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 e14 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 e15 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 e16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 Table 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Boundary data (ab, ξeb) of the curved geometry for the 4-simplex v6 = {1, 2, 4, 5, 7} e ξeb e′ e′ 10 e′ 14 e′ 17 e′ 20 e′ 21 e10 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='20 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='91 i, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='35 i) e14 (-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 i, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='47 i) e17 e20 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='76, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='22 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='61 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='74, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='57 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='36 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='85, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='52 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 i) e21 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='31 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07 i) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='39, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='89 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='23 i) e ab e′ e′ 10 e′ 14 e′ 17 e′ 20 e′ 21 e10 2 e14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 e17 e20 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 e21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 The curved geometry does not have real critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' However, we can find the pseudo-critical point (j0 h, g0 ve, z0 vf), which is close to the real critical point inside the real integration domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point satisfies the critical equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) but violates critical equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The data for the pseudo-critical point is listed in Table 26, 27, 28, 29, 30 and 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Table 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point (g0 ve, z0 vf) for the 4-simplex v1 = (1, 2, 3, 4, 6) e e1 e2 e3 g0 v1e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02i 0 1 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='06 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16i 0 1 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='78 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='71i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00024 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00065i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='29 � e e4 e5 g0 v1e �−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0016 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0001i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='91i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='46 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12i � e |z0 v1f⟩ e′ e′ 1 e′ 2 e′ 3 e′ 4 e′ 5 e1 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='70i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45i) e2 (1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='50i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='95i) e3 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5i) (1,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 + 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) e4 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40i) (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i) e5 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 - 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4i) Table 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point (g0 ve, z0 vf) for the 4-simplex v2 = (1, 2, 3, 5, 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e2 e6 e7 g0 v2e � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='99 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0024 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0112i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='30 0 1 � � 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='029 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='048i 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='97 � e e8 e9 g0 v2e � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0008 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00056i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0054 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0011i � � 0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='029 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='016i � e |z0 v2f⟩ e′ e′ 2 e′ 6 e′ 7 e′ 8 e′ 9 e2 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) e6 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='48i) (1, -1) e7 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) e8 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='33 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 - 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6 i) e9 (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i) (1,4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7 + 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9i) Table 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point (g0 ve, z0 vf) for the 4-simplex v5 = (1, 2, 3, 5, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e6 e13 e17 g0 v5e � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='011 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='006i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0012 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='011i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='19 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � e e18 e19 g0 v5e � −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00066 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00052 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='88i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='03 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i � e |z0 v5f⟩ e′ e′ 6 e′ 13 e′ 17 e′ 18 e′ 19 e6 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='06i) e13 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='87 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='50i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='83 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='56i) e17 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='93 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1,1,-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) e18 (1,-1) (1,-2 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i) e19 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='54i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 i) The boundary data for the curved geometry and the corresponding pseudo-critical point can be found in Mathematica notebook [57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 34 – Table 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The real critical point (g0 ve, z0 vf) for the 4-simplex v3 = (1, 2, 4, 5, 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e3 e7 e10 g0 v3e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='78 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='72i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='29 � � 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='030 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='046i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0010 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0018i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='38 0 1 � e e11 e12 g0 v3e �−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00013 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0001i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='85i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='55i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='16 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12i � e |z0 v3f⟩ e′ e′ 3 e′ 7 e′ 10 e′ 11 e′ 12 e3 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='69i) (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i) e7 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5i) (1, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9 + 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) e10 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08i) e11 (1,1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='8 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i) e12 (1,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 + 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0i) (1,5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7 + 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 i) Table 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point (g0 ve, z0 vf) for the 4-simplex v4 = (1, 2, 3, 4, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e1 e13 e14 g0 v4e � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='42 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0023 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0038i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01i � e e15 e16 g0 v4e �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0032 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0015i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='79i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='34 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='49 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01i � e |z0 v4f⟩ e′ e′ 1 e′ 13 e′ 14 e′ 15 e′ 16 e1 (1,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='88 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='46i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='91 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='40 i) e13 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1, -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='54i) e14 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77i) e15 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='83 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='56i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1 - 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i) e16 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='82 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81i) Table 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The pseudo-critical point (g0 ve, z0 vf) for the 4-simplex v6 = (1, 2, 4, 5, 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' e e10 e14 e17 g0 v6e � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00077 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00070i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05 � �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9i 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='83 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='73 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0014 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0019i 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2 � e e20 e21 g0 v6e � −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00019 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00100i −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='93i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='17 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='96i � � 0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='84i 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4 − 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3i � e |z0 v6f⟩ e′ e′ 10 e′ 14 e′ 17 e′ 20 e′ 21 e10 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68i) (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='68 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15i) e14 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='92 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='75i) (1,-1+0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='81i) e17 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='86 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07i) (1,-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='9+2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2i) e20 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='94 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='77i) (1,-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4i) e21 (1,-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='45 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='08i) (1,-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2+0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6i) D Regge Action Let’s first recall the volume of the simplex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The volume formula for the Lorentzian 4-simplex σ is given by [58, 59] Vσ = (−1)4 24(4!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' )2 det(Cσ) (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) where Vσ is the volume square and det(Cσ) is the Cayley–Menger determinant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Cayley–Menger matrix Cσ is the 6 × 6 matrix with entries l2 ij for i, j = 0, · · · , 4, where lij is the segment length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The Cayley–Menger matrix is augmented by an additional row and column with entries given by (Cσ)5,5 = 0 and (Cσ)i,5 = (Cσ)5,j = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' That is Cσ = � l2 ij 1i 1j 0 � (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2) Similarly, the volume formula of the Euclidean tetrahedron is given by Vτ = (−1)3+1 23(3!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' )2 det(Cτ) (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3) here, Cτ is the Cayley–Menger matrix for the tetrahedron, which is a 5 × 5 matrix defined similarly as the above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given ⃗a and⃗b as timelike normal vector of two tetrahedra τa, τb of the 4-simplex σ, the Lorentzian dihedral angles are [60, 61] θt(σ) = sgn(⃗a ·⃗b) cosh−1 � sgn(⃗a ·⃗b) ⃗a ·⃗b |⃗a||⃗b| � , sgn(⃗a ·⃗b) = � (⃗a ·⃗b)2 ⃗a ·⃗b .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4) In the 4-dimentional triangulation, the hinge of the angle is a triangle denoted by t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given a triangle t, it is shared by τa and τb, and s¯t is the length square of the segment opposite to the triangle t in σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' For example, in the 4-simplex σ = (12345), the tetrahedra τa = (1234) and τb = (1235) share the – 35 – triangle t = (123).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Then ¯t is the segment (45).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The dihedral angles w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='t t are given by [62] θt(σ) = �� 1 Vt ∂Vσ ∂s¯t �2 1 Vt ∂Vσ ∂s¯t cosh−1 � � � � �� 1 Vt ∂Vσ ∂s¯t �2 1 Vt ∂Vσ ∂s¯t 32·42 Vt ∂Vσ ∂s¯t � 32 Vτa Vt � 32 Vτb Vt � � � � (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5) Here, V are volume square (Vt = a2 t is the area square) and s is length square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' As we only consider the space-like triangles and tetrahedra, so all the volume square are positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' The above formula can be simplified as θt(σ) = �� 1 Vt ∂Vσ ∂s¯t �2 1 Vt ∂Vσ ∂s¯t cosh−1 � � � � 42 �� 1 Vt ∂Vσ ∂s¯t �2 � Vτa � Vτb � � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6) Here, the volume of 4-simplex, tetrahedra and areas of triangles can be computed by following Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='(D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Given any simplicial complex K, Regge action can be defined as SRegge = � σ⊂K � t⊂σ atθt(σ), (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7) where at are the areas of the triangles t and θt is the dihedral angle of triangle t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' References [1] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Witten, A New Look At The Path Integral Of Quantum Mechanics, arXiv:1009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6032.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [2] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Witten, Analytic Continuation Of Chern-Simons Theory, AMS/IP Stud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 50 (2011) 347–446, [arXiv:1001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2933].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [3] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Basar, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dunne, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Unsal, Resurgence theory, ghost-instantons, and analytic continuation of path integrals, JHEP 10 (2013) 041, [arXiv:1308.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1108].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [4] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dunne and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Unsal, Deconstructing zero: resurgence, supersymmetry and complex saddles, JHEP 12 (2016) 002, [arXiv:1609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05770].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [5] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Cristoforetti, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Di Renzo, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Mukherjee, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Scorzato, Quantum field theories on the Lefschetz thimble, PoS LATTICE2013 (2014) 197, [arXiv:1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1052].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [6] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Witten, A Note On Complex Spacetime Metrics, arXiv:2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='06514.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [7] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Conrady and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Freidel, On the semiclassical limit of 4d spin foam models, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D78 (2008) 104023, [arXiv:0809.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2280].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [8] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Barrett, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dowdall, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Fairbairn, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Hellmann, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Pereira, Lorentzian spin foam amplitudes: Graphical calculus and asymptotics, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 27 (2010) 165009, [arXiv:0907.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2440].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [9] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Zhang, Asymptotics of Spinfoam Amplitude on Simplicial Manifold: Lorentzian Theory, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 30 (2013) 165012, [arXiv:1109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0499].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [10] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bianchi, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Magliaro, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Perini, LQG propagator from the new spin foams, Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' B 822 (2009) 245–269, [arXiv:0905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4082].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, On Spinfoam Models in Large Spin Regime, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 31 (2014) 015004, [arXiv:1304.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5627].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 36 – [12] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Huang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Liu, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Qu, Complex critical points and curved geometries in four-dimensional Lorentzian spinfoam quantum gravity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 106 (2022), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 4 044005, [arXiv:2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='10670].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [13] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Asante, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Haggard, Effective Spin Foam Models for Four-Dimensional Quantum Gravity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 125 (2020), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 23 231301, [arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07013].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [14] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Liu, Analytic continuation of spinfoam models, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 105 (2022), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 2 024012, [arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='06902].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [15] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bianchi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Modesto, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rovelli, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Speziale, Graviton propagator in loop quantum gravity, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 23 (2006) 6989–7028, [gr-qc/0604044].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [16] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bianchi, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Magliaro, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Perini, LQG propagator from the new spin foams, Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' B822 (2009) 245–269, [arXiv:0905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4082].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [17] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bianchi and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Ding, Lorentzian spinfoam propagator, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D86 (2012) 104040, [arXiv:1109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='6538].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [18] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Magliaro and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Perini, Emergence of gravity from spinfoams, EPL 95 (2011), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 3 30007, [arXiv:1108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='2258].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [19] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Perini, Einstein-Regge equations in spinfoams, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 360 (2012) 012050, [arXiv:1110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5899].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [20] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Magliaro and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Perini, Regge gravity from spinfoams, Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 22 (2013) 1–21, [arXiv:1105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0216].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [21] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, Semiclassical Analysis of Spinfoam Model with a Small Barbero-Immirzi Parameter, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 88 (2013) 044051, [arXiv:1304.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5628].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [22] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dona, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Liu, Spinfoams and high performance computing, arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14396.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [23] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Huang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Liu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Qu, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Wan, Spinfoam on Lefschetz Thimble: Markov Chain Monte-Carlo Computation of Lorentzian Spinfoam Propagator, arXiv:2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11515.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [24] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Huang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Liu, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Qu, Numerical computations of next-to-leading order corrections in spinfoam large-j asymptotics, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 102 (2020), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 12 124010, [arXiv:2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='01998].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [25] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Gozzini, A high-performance code for EPRL spin foam amplitudes, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 38 (2021), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 22 225010, [arXiv:2107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13952].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [26] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Frisoni, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Gozzini, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Vidotto, Markov Chain Monte Carlo methods for graph refinement in Spinfoam Cosmology, arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02881.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [27] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dona and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Frisoni, How-to Compute EPRL Spin Foam Amplitudes, Universe 8 (2022), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 4 208, [arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='04360].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [28] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Asante, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Padua-Arguelles, Effective spin foam models for Lorentzian quantum gravity, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 38 (2021), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 19 195002, [arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00485].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [29] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Asante, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Sim˜ao, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Steinhaus, Spin-foams as semi-classical vertices: gluing constraints and a hybrid algorithm, arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='13540.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [30] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Asante, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Steinhaus, Spin foams, Refinement limit and Renormalization, arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09578.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [31] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bahr and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Steinhaus, Numerical evidence for a phase transition in 4d spin foam quantum gravity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 117 (2016), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 14 141302, [arXiv:1605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='07649].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [32] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rovelli and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Smolin, Discreteness of area and volume in quantum gravity, Nuclear Physics B 442 (May, 1995) 593–619.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [33] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Ashtekar and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Lewandowski, Quantum theory of geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 1: Area operators, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 14 (1997) A55–A82, [gr-qc/9602046].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 37 – [34] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Krajewski, Path Integral Representation of Lorentzian Spinfoam Model, Asymptotics, and Simplicial Geometries, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 31 (2014) 015009, [arXiv:1304.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5626].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [35] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bonzom, Spin foam models for quantum gravity from lattice path integrals, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 80 (2009) 064028, [arXiv:0905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1501].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [36] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Engle, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Kaminski, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Oliveira, Addendum to ‘eprl/fk asymptotics and the flatness problem’, Classical and Quantum Gravity 38 (2021), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 11 119401.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [37] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Huang, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Zipfel, Spin foam propagator: A new perspective to include the cosmological constant, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 97 (2018), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 8 084055, [arXiv:1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='11162].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [38] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han, Einstein Equation from Covariant Loop Quantum Gravity in Semiclassical Continuum Limit, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 96 (2017), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 2 024047, [arXiv:1705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09030].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [39] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Kogios, From spin foams to area metric dynamics to gravitons, arXiv:2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='02409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [40] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Barrett, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rocek, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Williams, A Note on area variables in Regge calculus, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 16 (1999) 1373–1376, [gr-qc/9710056].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [41] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Melin and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Sj¨ostrand, Fourier integral operators with complex-valued phase functions, in Fourier Integral Operators and Partial Differential Equations (J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Chazarain, ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' ), (Berlin, Heidelberg), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 120–223, Springer Berlin Heidelberg, 1975.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [42] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Hormander, The Analysis of Linear Partial Differential Operators I, ch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Chapter 7, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Springer-Verlag Berlin, 1983.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [43] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Conrady and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Freidel, Path integral representation of spin foam models of 4d gravity, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 25 (2008) 245010, [arXiv:0806.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4640].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [44] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Barrett, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dowdall, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Fairbairn, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Gomes, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Hellmann, Asymptotic analysis of the EPRL four-simplex amplitude, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 50 (2009) 112504, [arXiv:0902.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='1170].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [45] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='-X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Han and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Zhang, Asymptotics of Spinfoam Amplitude on Simplicial Manifold: Euclidean Theory, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 29 (2012) 165004, [arXiv:1109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='0500].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [46] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dona, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Fanizza, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Sarno, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Speziale, Numerical study of the Lorentzian Engle-Pereira-Rovelli-Livine spin foam amplitude, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D100 (2019), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 10 106003, [arXiv:1903.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='12624].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [47] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Kapovich and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Millson, The symplectic geometry of polygons in Euclidean space, Journal of Differential Geometry 44 (1996), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 3 479 – 513.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [48] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rovelli and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Speziale, A Semiclassical tetrahedron, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 23 (2006) 5861–5870, [gr-qc/0606074].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [49] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dona and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Speziale, Asymptotics of lowest unitary SL(2,C) invariants on graphs, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 102 (2020), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 8 086016, [arXiv:2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='09089].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [50] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Asante, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Haggard, Discrete gravity dynamics from effective spin foams, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 38 (2021), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 14 145023, [arXiv:2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='14468].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [51] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Liu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='com/LQG-Florida-Atlantic-University/extended_spinfoam, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [52] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Qu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='com/dqu2017/ 4-simplex-amplitude-and-effective-dynamics-on-double-Delta3-complex, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [53] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Lefschetz, The Picard-Lefschetz Theory, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 135–148.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Springer New York, New York, NY, 1975.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [54] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Alexandru, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Basar, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Bedaque, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Warrington, Complex Paths Around The Sign Problem, arXiv:2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='05436.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [55] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Ashtekar, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Pawlowski, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Singh, Quantum Nature of the Big Bang: Improved dynamics, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D74 (2006) 084003, [gr-qc/0607039].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 38 – [56] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Ashtekar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Olmedo, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Singh, Quantum Transfiguration of Kruskal Black Holes, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 121 (2018), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 24 241301, [arXiv:1806.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00648].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [57] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Qu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='com/dqu2017/Numerical-Asymtotics, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [58] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Tate and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Visser, Realizability of the Lorentzian (n,1)-Simplex, JHEP 01 (2012) 028, [arXiv:1110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='5694].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [59] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Tate and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Visser, Fixed-topology lorentzian triangulations: Quantum regge calculus in the lorentzian domain, Journal of High Energy Physics 2011 (2011) 1–32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [60] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Asante, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Padua-Arg¨uelles, Complex actions and causality violations: Applications to Lorentzian quantum cosmology, arXiv:2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='15387.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [61] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Gielen, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Schander, Lorentzian quantum cosmology goes simplicial, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 39 (2022), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' 3 035012, [arXiv:2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='00875].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' [62] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Dittrich, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Freidel, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Speziale, Linearized dynamics from the 4-simplex Regge action, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' D 76 (2007) 104020, [arXiv:0707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content='4513].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} +page_content=' – 39 –' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNE1T4oBgHgl3EQfHwN5/content/2301.02930v1.pdf'} diff --git a/SNFAT4oBgHgl3EQf1h4R/content/tmp_files/2301.08709v1.pdf.txt b/SNFAT4oBgHgl3EQf1h4R/content/tmp_files/2301.08709v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..546d50ec25409cbe7691932c3f075617702dd2fa --- /dev/null +++ b/SNFAT4oBgHgl3EQf1h4R/content/tmp_files/2301.08709v1.pdf.txt @@ -0,0 +1,2059 @@ +Fermion Soliton Stars +Loris Del Grosso,1, 2 Gabriele Franciolini,1, 2 Paolo Pani,1, 2 and Alfredo Urbano1, 2 +1Dipartimento di Fisica, Sapienza Università di Roma, Piazzale Aldo Moro 5, 00185, Roma, Italy +2INFN, Sezione di Roma, Piazzale Aldo Moro 2, 00185, Roma, Italy +(Dated: January 23, 2023) +A real scalar field coupled to a fermion via a Yukawa term can evade no-go theorems preventing +solitonic solutions. +For the first time, we study this model within General Relativity without +approximations, finding static and spherically symmetric solutions that describe fermion soliton +stars. The Yukawa coupling provides an effective mass for the fermion, which is key to the existence +of self-gravitating relativistic solutions. We systematically study this novel family of solutions +and present their mass-radius diagram and maximum compactness, which is close to (but smaller +than) that of the corresponding Schwarzschild photon sphere. Finally, we discuss the ranges of +the parameters of the fundamental theory in which the latter might have interesting astrophysical +implications, including compact (sub)solar and supermassive fermion soliton stars for a standard gas +of degenerate neutrons and electrons, respectively. +CONTENTS +I. Introduction +1 +II. Setup +2 +A. Thomas-Fermi approximation +2 +B. Dimensionless equations of motion and +boundary conditions +3 +III. Some preliminary theoretical considerations +5 +A. On the crucial role of fermions for the +existence of solitonic stars +5 +B. Scaling of the physical quantities in the +µR ≫ 1 regime +6 +C. Energy conditions +8 +IV. Numerical results +9 +A. Numerical strategy +9 +B. Fermion soliton stars +10 +C. On the existence of a Newtonian regime +11 +V. Parameter space and astrophysical implications +12 +VI. Conclusions +14 +Acknowledgments +14 +A. Connection with scalar-tensor theories +14 +References +16 +I. +INTRODUCTION +Solitonic solutions play a crucial role in many field +theories, in particular in General Relativity. In the con- +text of the latter, starting from Wheeler’s influential idea +of geons [1], considerable attention has been devoted +to find minimal models allowing for self-gravitating soli- +tonic solutions [2]. +The prototypical example is that +of boson stars [3–5] (and of their Newtonian analog, Q- +balls [6]), which are self-gravitating solutions to Einstein- +Klein-Gordon theory with a complex and massive scalar +field (see [7–9] for some reviews). If the scalar field is real, +no-go theorems prevent the existence of solitonic solutions +for very generic classes of scalar potential [10, 11]. Indeed, +Einstein-Klein-Gordon theory contains time-dependent +solutions known as oscillatons which, however, decay in +time [12]. +About forty years ago, Lee and Pang proposed a model +in which a real scalar field with a false-vacuum potential +is coupled to a massive fermion via a Yukawa term [13]. +Working in a thin-wall limit in which the scalar field +is a step function, for certain parameters of the model +they obtained approximated solutions describing fermion +soliton stars. +The scope of this paper is twofold. On the one hand we +show that fermion soliton stars exist in this model also +beyond the thin-wall approximation, and we build exact +static solutions within General Relativity. On the other +hand, we elucidate some key properties of the model, in +particular the role of the effective fermion mass provided +by the Yukawa coupling. Then, we explore the model +systematically, presenting mass-radius diagrams and the +maximum compactness of fermion soliton stars for various +choices of the parameters, showing that in this model a +standard gas of degenerate neutrons (resp. electrons) can +support stable (sub)solar (resp. supermassive) fermion +soliton stars with compactness comparable to that of +ordinary neutron stars. This analysis paves the way for a +detailed study of the phenomenology of fermion soliton +stars as a motivated model of exotic compact objects [14]. +Finally, in Appendix A, we explore the connection of the +model to a very peculiar scalar-tensor theory. +We use the signature (−, +, +, +) for the metric, adopt +natural units (ℏ = c = 1) and define the Planck mass +through G = m−2 +p . +arXiv:2301.08709v1 [gr-qc] 20 Jan 2023 + +2 +II. +SETUP +We consider a theory in which Einstein gravity is mini- +mally coupled to a real scalar field φ and a fermion field +ψ. The action can be written as [13] +S = +� +d4x√−g +� +R +16πG − 1 +2∂µφ∂µφ − U(φ) ++ ¯ψ(iγµDµ − mf)ψ + fφ ¯ψψ +� +, +(1) +where the scalar potential is +U(φ) = 1 +2µ2φ2� +1 − φ +φ0 +�2 +, +(2) +and features two degenerate minima at φ = 0 and φ = φ0. +The constant µ (resp. mf) is the mass of the scalar (resp. +fermion). The Yukawa interaction is controlled by the +coupling f. It should be noted that Eq. (1) describes the +action of a local field theory and, therefore, we expect that +all physics derived from it will naturally respect causality +conditions (that, on the contrary, could be violated in +the absence of such underlying formulation). Also, we +point out that the matter Lagrangian in Eq. (1) describes +a renormalizable field theory; this is in contrast to the +widely used model describing solitonic boson stars [15–18] +in which the scalar potential is non-renormalizable and +field values should not exceed the limit of validity of the +corresponding effective field theory. The covariant deriva- +tive Dµ in Eq. (1) takes into account the spin connection +of the fermionic field. +From the quadratic terms in the fermion Lagrangian, +it is useful to define an effective mass, +meff = mf − fφ. +(3) +We will focus on scenarios in which the fermion becomes +effectively massless (i.e. meff = 0) when the scalar field +sits on the second degenerate vacuum, φ = φ0. This +condition implies fixing +f = mf +φ0 +. +(4) +As we shall discuss, we are mostly interested in configu- +rations where the scalar field makes a transition between +the false1 vacuum (φ ≈ φ0) to the true vacuum (φ ≈ 0). +A. +Thomas-Fermi approximation +The description of a fermionic field in Eq. (1) requires +treating the quantization of spin-1/2 particles in curved +1 Although the minima at φ = 0 and φ = φ0 are degenerate, +we shall call them true and false vacuum, respectively, having +in mind the generalization in which the potential U(φ) can be +nondegenerate, i.e. U(φ0) ̸= U(0), see Fig. 1 below. +spacetime. The problem can be simplified significantly +within the Thomas-Fermi approximation2, relying on the +following assumptions: +i) the gravitational and scalar fields are slowly varying +functions with respect to the fermion dynamics, +they do not interact directly with the (microscopic) +fermionic field ψ, but with average macroscopic +quantities (mean-field approximation); +ii) the fermion gas is at equilibrium so that all the +macroscopic quantities are time independent. +In practice, one can divide the entire three-space into +small domains which are much larger than the de Broglie +wavelength of the typical fermion, but sufficiently small +that the gravitational and scalar fields are approximately +constant inside each domain. Then, every domain is filled +with a degenerate (i.e. the temperature is much smaller +than the chemical potential) Fermi gas, in such a way that +the Fermi distribution is approximated by a step function, +nk = θ(kF − k), where kF(xµ) is the Fermi momentum +observed in the appropriate local frame. +The energy density of the fermion gas reads +W = +2 +(2π)3 +� kF +0 +d3k ϵk, +(5) +where ϵk = +� +k2 + m2 +eff. Notice that W = W(xµ) through +the spacetime dependence of kF. In an analogous way, we +obtain the fermion gas pressure P and the scalar density +S = ⟨ ¯ψψ⟩ as +P = +2 +(2π)3 +� kF +0 +d3k k2 +3ϵk +, +(6) +S = +2 +(2π)3 +� kF +0 +d3k meff +ϵk +. +(7) +It it easy to show that these quantities satisfy the identity +W − 3P = meffS. +(8) +In the Thomas-Fermi approximation, the fermions enter +Einstein’s equations as a perfect fluid characterised by an +energy-momentum tensor of the form +T [f] +µν = (W + P)uµuν + Pgµν, +(9) +while they also enter the scalar field equation through the +scalar density S. Indeed, by varying the action in Eq. (1) +with respect to φ, we obtain a source term of the form +≈ f ¯ψψ. Within the Thomas-Fermi approximation, this +becomes +f ¯ψψ → f⟨ ¯ψψ⟩ ≡ fS, +(10) +which is consistent with the fact that, in the fluid descrip- +tion, the scalar field equation couples to fermions through +a term proportional to the trace (T [f])µ +µ = −W + 3P. +2 We point the interested reader to Appendix A of Ref. [13] for +a complete derivation of the Thomas-Fermi approximation in +curved spacetime, while here we summarise the main properties. + +3 +1. +Equations of motion +It is now possible to write down the equations of motion +for our theory in covariant form +Gµν = 8πG Tµν, +□φ − ∂U +∂φ + fS = 0, +(11) +where +Tµν = −2 +� ∂Lφ +∂gµν − 1 +2gµνLφ +� ++ T [f] +µν , +(12) +in which Lφ is the Lagrangian density of the scalar field. +In order to close the system, we need an equation de- +scribing the behaviour of kF. This is obtained by mini- +mizing the energy of the fermion gas at fixed number of +fermions [13]. +From now on, for simplicity, we will consider spherically +symmetric equilibrium configurations, whose background +metric can be expressed as +ds2 = −e2u(ρ)dt2 +e2v(ρ)dρ2 +ρ2(dθ2 +sin2 θdϕ2), (13) +in terms of two real metric functions u(ρ) and v(ρ). Fur- +thermore, we will assume that the scalar field in its equilib- +rium configuration is also static and spherically symmetric, +φ(t, ρ, θ, ϕ) = φ(ρ). Being the spacetime static and spher- +ically symmetric, kF = kF(ρ) can only be a function of +the radial coordinate. +2. +Fermi momentum equation +In the Thomas-Fermi approximation the fermion gas +energy can be written as [13] +Ef = 4π +� +dρ ρ2 eu(ρ)+v(ρ) W, +(14) +while the number of fermions is +N = 4 +3π +� +dρ ρ2ev(ρ)kF(ρ). +(15) +To enforce a constant number of fermions, we introduce +the Lagrangian multiplier ωF and define the functional +E′ +f[kF] = Ef[kF] − ωF +� +N[kF] − Nfixed +� +, +(16) +which is minimized by imposing the condition +δE′ +f[kF] +δkF(ρ) = 0. +(17) +One directly obtains ϵF = e−uωF, where ϵF = ϵkF is +the Fermi energy. Thus, ωF coincides with the Fermi +energy in flat spacetime while it acquires a redshift factor +otherwise. Finally, we find +k2 +F(ρ) = ω2 +Fe−2u(ρ) − (mf − fφ(ρ))2 . +(18) +B. +Dimensionless equations of motion and +boundary conditions +In order to simplify the numerical integrations, as well +as physical intuition, it is convenient writing the field +equations in terms of dimensionless quantities. To this +end, we define +x = kF +mf +, +y = φ +φ0 +, +r = ρµ. +(19) +Therefore, the potential and kinetic terms become +U = µ2φ2 +0 +�1 +2y2(1 − y)2 +� +≡ µ2φ2 +0 ˜U(y), +V = µ2φ2 +0 +�1 +2e−2v(r)(∂ry)2 +� +≡ µ2φ2 +0 ˜V (y), +(20) +while Eqs. (5)-(7) can be computed analytically as +W = +2 +(2π)3 +� kF(ρ) +0 +d3k +� +k2 + (mf − fφ(ρ))2 = m4 +eff +8π2 +� +s +� +1 + s2(1 + 2s2) − log +� +s + +� +s2 + 1 +�� +≡ m4 +f ˜W(x, y), (21a) +P = +2 +(2π)3 +� kF(ρ) +0 +d3k k2 +3 +� +k2 + (mf − fφ(ρ))2 = m4 +eff +8π2 +� +s +�2 +3s2 − 1 +� � +1 + s2 + log +� +s + +� +s2 + 1 +�� +≡ m4 +f ˜P(x, y), (21b) +S = +2 +(2π)3 +� kF(ρ) +0 +d3k +mf − fφ(ρ) +� +k2 + (mf − fφ(ρ))2 = m3 +eff +2π2 +� +s +� +1 + s2 − log +� +s + +� +s2 + 1 +�� +≡ m3 +f ˜S(x, y), +(21c) +where ˜W, ˜P, ˜S are dimensionless quantities and we intro- +duced s ≡ x/(1 − y) for convenience. Remarkably, these +expressions are the same as in the standard case of a +minimally coupled degenerate gas with the substitution + +4 +mf → meff. +As we shall discuss in Appendix A, this property will be +important when comparing this model to a scalar-tensor +theory. Note that the massless limit, meff → 0, should +be taken carefully as not all the dependence on meff is +expressed in the dimensional prefactor. By performing +the first integrals in Eqs. (21a)-(21c) in the meff → 0 limit, +we obtain W = P/3, as expected for an ultrarelativistic +degenerate gas. +It is convenient to further introduce the dimensionless +combination of parameters +Λ = +√ +8πφ0 +mp +, +η = +mf +µ1/2φ1/2 +0 +. +(22) +Finally, the field equations (i.e. the Einstein-Klein-Gordon +equations with the addition of the Fermi momentum +equation) take the compact form +e−2v − 1 − 2e−2vr∂rv = −Λ2r2 � +η4 ˜W + ˜U + ˜V +� +, +e−2v − 1 + 2e−2vr∂ru = Λ2r2 � +η4 ˜P − ˜U + ˜V +� +, +e−2v� +∂2 +ry + +� +∂ru − ∂rv + 2 +r +� +∂ry +� += ∂ ˜U +∂y − η4 ˜S, +x2 = ˜ω2 +Fe−2u(r) − (1 − y)2, +(23) +where ˜U, ˜V , ˜P, ˜W, and ˜S depend on x, y, and r, and +we also introduced ˜ωF = ωF/mf. Static and spherically +symmetric configurations in the model (1) are solutions to +the above system of ordinary differential equations. For +clarity, we summarise the relevant parameters in Table I. +1. +Absence of φ = const solutions +Note that, because U = 0 = dU/dφ in both degen- +erate vacua, it is natural to first check what happens +when φ = φ0 = const or if φ = 0. The former case (i.e. +y(ρ) = 1) is an exact solution of the scalar equation and +reduces the Einstein’s equations to those of gravity cou- +pled to a degenerate gas of massless (since meff(φ0) = 0) +fermions. In this case, self-gravitating solutions do not +have a finite radius [19]. On the other hand, due to the +Yukawa coupling, in the presence of a fermion gas φ = 0 +is not a solution to the scalar field equation. +Thus, self-gravitating solutions to this model must have +a nonvanishing scalar-field profile. In particular, we will +search for solutions that (approximately) interpolate be- +tween these two vacuum states. +2. +Boundary conditions at ρ = 0 +Regularity at the center of the star (ρ = 0) imposes the +following boundary conditions +v(r = 0) = 0, +u(r = 0) = 0, +y(r = 0) = 1 − ϵ, +∂ry(0) = 0, +TABLE I: List of the model parameters, the fermion +soliton star parameters, and the dimensionless quantities +adopted to express the system of equations in compact +form. Due to the condition in Eq. (4), in our case only +three model parameters are independent. +Model parameters +µ +Scalar field mass +φ0 +VEV of the false vacuum +mf +Fermion mass +f +Yukawa coupling +Solution parameters (boundary conditions) +Pc +Fermion central pressure +ϵ = 1 − φ/φ0 +Central scalar field displacement +Dimensionless parameters/variables +Λ = +√ +8πφ0/mp +Dimensionless VEV of the false vacuum +η = mf/√µφ0 +Scale ratio +x = kF/mf +Fermi momentum +y = φ/φ0 +Scalar field +r = ρµ +Rescaled radius +˜P(r = 0) = ˜Pc, +(24) +where ϵ > 0 will be fixed numerically through a shooting +procedure in order to obtain asymptotic flatness. +In practice, in a large region of the parameter space +one obtains ϵ ≪ 1. In this limit, using Eq. (21b) and +Eq. (18), we find +˜ωF ≡ ωF +mf += (12π2 ˜Pc)1/4+ +3 +4(12π2 ˜Pc)1/4 ϵ2+O(ϵ3). (25) +In general, ˜ωF is fixed in terms of the central values of +the pressure and scalar field. +Finally, since a shift u(ρ) → u(ρ) + const in Eq. (23) +merely corresponds to a shift of the fermionic central +pressure, we have imposed u(ρ = 0) = 0 without loss of +generality. +3. +Definitions of mass, radius, and compactness +We define the mass of the object as +M = m(ρ → +∞) +G +, +(26) +where the function m(ρ) is related to the metric coefficient +v(ρ) by e2v(ρ) = 1 − 2m(ρ)/ρ, and can be interpreted as +the mass-energy enclosed within the radius ρ. In terms +of the dimensionless variables introduced in Eq. (19), it +is convenient to define ˜m(r) = µm(ρ). Thus, one obtains +µM +m2p += ˜m(r). +(27) + +5 +Notice that, in the asymptotic limit r → ∞, Eq. (27) +becomes independent of the radius. +Typically, the radius of a star is defined as the value of +the radial coordinate at the point where pressure drops to +zero. As we shall discuss, in our case the fermion soliton +stars will be characterised by a lack of a sharp boundary. +Analogously to the case of boson stars [9], one can define +an effective radius R within which 99% of the total mass +is contained. (As later discussed, we shall also define the +location Rf where only the pressure of the fermion gas +vanishes.) Finally, we can define the compactness of the +star as GM/R. +III. +SOME PRELIMINARY THEORETICAL +CONSIDERATIONS +Before solving the full set of field equations numerically, +in this section we provide some theoretical considerations +that might be useful to get a physical intuition of the +model. +A. +On the crucial role of fermions for the existence +of solitonic stars +1. +Classical mechanics analogy +In order to understand why the presence of fermions in +this theory plays a crucial role for the existence of station- +ary solutions, it is useful to study a classical mechanics +analogy for the dynamics of the scalar field [6]. +For the moment we consider flat spacetime. Further- +more, we start by ignoring the fermions (we will relax +this assumption later on). The set of Eqs. (23) drastically +simplifies to a single field equation +∂2 +ρφ + 2 +ρ∂ρφ − ∂U +∂φ = 0. +(28) +To make the notation more evocative of a one-dimensional +mechanical system, we rename +ρ → t, +φ(ρ) → φ(t), +ˆU := −U, +(29) +in such a way that the equation of motion becomes +φ′′(t) = −∂ ˆU +∂φ − 2 +t φ′(t), +(30) +which describes the one-dimensional motion of a parti- +cle with coordinate φ(t) in the presence of an inverted +potential, ˆU, and a velocity-dependent dissipative force, +−(2/t)φ′(t). Within this analogy, the boundary (or initial) +conditions (24) simply become +φ(t = 0) = φ0 − δφ, +φ′(t = 0) = 0, +(31) +where φ0 is the position of the false vacuum and δφ = ϵφ0. +As we impose zero velocity at t = 0, the initial energy is +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +1.2 +-3 +-2 +-1 +0 +FIG. 1: Inverted potential with degeneracy (blue line, our +case) and without degeneracy between vacua (orange). +E(0) = ˆU(φ0 − δφ). The energy E(t) of the particle at a +time t is obtained by subtracting the work done by the +friction: +E(t) − E(0) = L(t), +(32) +where +L(t) = −2 +� t +0 +dt′ ˙φ2(t′) +t′ +. +(33) +Note that, owing to the initial conditions, this integral +is regular at t = 0. On the other hand, the existence of +a solution with asymptotically zero energy requires the +particle to arrive with zero velocity at φ = 0 for t → +∞. +Therefore, we impose E(t → ∞) = 0. As the total energy +loss due to friction is L(t → ∞), the latter condition +means +E(0) = −L(t → ∞) +(34) +that is +ˆU(φ0 − δφ) = 2 +� ∞ +0 +dt′ ˙φ2(t′) +t′ +. +(35) +This equation can be interpreted as an equation for δφ +in order to allow for the existence of a "bounce" solution3. +One can demonstrate the existence of such a solution +heuristically. Let us first consider a slightly modified ver- +sion of the inverted potential without degeneracy (orange +plot in Fig. 1). Obviously, if the motion starts exactly +at φ0 with zero velocity, the particle would remain at +3 A bounce solution is the one reaching asymptotically the true +vacuum with zero energy, after having "bounced" at the minimum +of the inverted potential. + +6 +rest. However, if we start on the left of the maximum +the particle will roll down, bounce, and eventually climb +the leftmost hill shown in Fig. 1. Now, if the dynamics +starts too far from φ0 (still on the left of the maximum), +with zero initial velocity it might not have enough energy +to reach the zero-energy point at φ = 0. Similarly, if +the dynamics starts too close to φ0, the particle might +reach φ = 0 with positive energy and overcome the hill +rolling up to φ → −∞. By continuity, there must exist a +unique point such that the total energy loss due to friction +compensates the initial gap of energy with respect to the +energy of φ = 0. +However, by applying the same argument to our degen- +erate case (blue curve in Fig. 1), it is easy to see that there +is no solution to Eq. (35) 4. This is because the energy +loss due to friction is nonzero, so the particle will never +reach φ = 0 and is doomed to roll back in the potential +eventually oscillating around the minimum of ˆU. This +shows that, in the degenerate case considered in this work, +a simple scalar model does not allow for bounce solutions +in flat spacetime. +If we now reintroduce fermions in the theory, the scalar +field equation reads (still in flat spacetime) +φ′′(t) = −∂ ˆU +∂φ − 2 +t φ′(t) − fS. +(36) +Since S ≥ 0, the fermions act with a force pushing our +particle towards the origin, potentially giving the right +kick to allow the particle reaching φ = 0 asymptotically. +As we shall see, this also requires S = 0 (i.e., no fermions) +around the origin, in order for the particle to reach a +stationary configuration at φ = 0. +This simple analogy shows how the presence of the +fermions is fundamental as it allows the solution to exist. +In the following section we will show how this is realised +in the full theory which includes gravitational effects. +Furthermore, we will show that, in certain regions of the +parameter space, relativistic effects are in fact crucial for +the existence of the solution, since the latter requires a +minimum fermionic pressure to exist. +2. +Evading the no-go theorem for solitons +The above conclusions, deduced from our simple heuris- +tic picture, holds also in the context of General Relativity. +Indeed, without fermions in the system of Eqs. (23), and +since our potential (2) is nonnegative, a general theorem +proves that no axially symmetric and stationary solitons +(that is asymptotically flat, localized and everywhere reg- +ular solutions) can exist [10, 11]. +However, the presence of fermions evades one of the +hypotheses of the theorem. As we will show, in this case +4 At least if we look for a solution in which the scalar field does +the transition at a finite time. +stationary solitons generically exist also for a real scalar +field (at variance with the case of boson stars, that require +complex scalars) and for a wide choice of the parameters. +B. +Scaling of the physical quantities in the µR ≫ 1 +regime +Assuming µR ≫ 1, it is possible to derive an analytical +scaling for various physical quantities, as originally derived +in Ref. [20] and similar in spirit to Landau’s original +computation for ordinary neutron stars (see, e.g., [19]). +It is instructive to consider the following toy model +in the absence of gravity. We consider a theory with +an additive quantum number N, brought by a spin-1/2 +field ψ. We then add a real scalar field φ with the usual +potential described in Eq. (2). Such a scalar field obeys +Eq. (28) along with the initial condition (31). +Since +µR ≫ 1, its solution is well approximated by a stiff Fermi +function [13],[20] +φ(ρ) ≈ +φ0 +1 + eµ(ρ−R) . +(37) +The definition of kF is (we take Eq. (18) with u = 0 since +we work in absence of gravity) +k2 +F(ρ) = ω2 +F − (mf − fφ(ρ))2 . +(38) +Because of Eq. (37), the Fermi momentum is nearly fixed +to the constant value ωF for ρ ≲ R, and for ρ ≈ R it goes +to zero stiffly. Therefore, the field ψ is approximately +confined within the sphere of radius R. We assume that +the quanta of ψ are noninteracting, massless (consistently +with the fact that we are interested in configurations in +which the fermions are approximately massless in the +core of the star) and described by Fermi statistics at zero +temperature. Thus, we obtain the standard relation for +the particle density +n = #particles +unit.volume = +2 +8π3 +� kF +0 +4πk2dk = ω3 +F +3π2 . +(39) +Since kF ≃ ωF = const, the total number of particles is +N = n +� R +0 +4πρ2dρ = 4 +9π (RωF)3. +(40) +The fermion energy is +Ef = +� R +0 +4πρ2dρ W = (3π)1/3�3 +4N +�4/3 1 +R, +(41) +where +W = +energy +unit.volume = +2 +8π3 +� kF +0 +4πk2dk · k = ω4 +F +4π2 . +(42) +The energy associated to the scalar field φ is instead +Es = +� R +0 +4πρ2dρ (U + V ) ≃ +�1 +6µφ2 +0 +� +4πR2 , +(43) + +7 +TABLE II: Analytical scalings of the some physical quan- +tities at the maximum mass Mc in the µR ≫ 1 limit. +Mass +µMc/m2 +p ∼ 1/Λ2 +Radius +µRc ∼ µMc/m2 +p ∼ 1/Λ2 +˜ωF +˜ωc +F ∼ (µ/mp)1/2/(φ0/mf) ∼ Λ1/2/η +Central pressure +˜Pc ∼ ˜ω4 +F ∼ Λ2/η4 +where we have used that fact that +12 +µφ2 +0 +U ≃ 12 +µφ2 +0 +V ≃ δ(ρ − R) , +(44) +which can be shown using Eq. (37) and µR ≫ 1. +The total energy of our configuration is +E = Ef + Es, +(45) +while the radius can be found by imposing ∂E/∂R = 0, +yielding +R = +� 3 +4π (3π)1/3�3 +4N +�4/3�1/3� 1 +µφ2 +0 +�1/3 +(46) +and the mass +M = E(R) = 12πR2�1 +6µφ2 +0 +� +. +(47) +From Eq. (46),(47), we get +R ∼ N 4/9 +M ∼ N 8/9 . +(48) +Thus, at least for large N, the mass of the soliton is lower +than the energy of the sum of N free particles, ensuring +stability. +In the absence of gravity, M can be arbitrarily large. +However, due to relativistic effects we expect the existence +of a maximum mass beyond which the object is unstable +against radial perturbations. +We expect that gravity +becomes important when 2GM/R ∼ 1. Therefore, the +critical mass Mc can be estimated by simply imposing +R ∼ 2GMc in Eq. (47), yielding G2Mc ∼ 1/µφ2 +0 and thus +µMc +m2p +∼ 1 +Λ2 . +(49) +Likewise, one can obtain the scaling of all other relevant +quantities, which we collect in Table II. +1. +Self-consistency criteria +When deducing the scaling reported in Table II, we +made the following assumptions: +i) µR ≫ 1; +ii) a gas of massless fermions in the interior of the star. +In practice, the first assumption is not restrictive. Indeed, +since µ−1 is the Compton wavelength of the scalar boson, +in the context of a classical field theory we should always +impose µR ≫ 1. In other words, if µR ≃ 1 the quantum +effects of the scalar field become important on the scale of +the star and one cannot trust the classical theory anymore. +The hypothesis µR ≫ 1 is an essential ingredient in order +to approximate the scalar field profile with Eq. (37), and +to assume, as a consequence, that the kF is a step function. +Besides, it guarantees that the energy density of the scalar +field is near to a delta function. Using the scaling reported +in Table II, condition i) implies Λ ≪ 1. +One may worry that the second assumption can be +violated, since the scalar field is not located exactly at +φ0 in the origin ρ = 0, and therefore fermions are never +exactly massless. It is enough checking that the fermion +gas is very close to be a massless gas. Let us recall that +the effective mass of the fermion is defined as +meff(ρ) = mf +� +1 − φ(ρ) +φ0 +� +(50) +and therefore meff(ρ = 0) = mfϵ. We can say that the +fermion gas is effectively massless when W/P = 3. From +Eq. (5) and (6), at the lowest order in ϵ one obtains +W +P = 3 +� +1 + +2m2 +fϵ2 +k2 +F +� ++ O(ϵ3), +(51) +which indicates we should require +2m2 +fϵ2 +k2 +F +≪ 1 +(52) +in the vicinity of the origin at ρ ≃ 0. At larger radii, +the scalar field gradually moves away from the central +configurations and fermions start retaining a bare mass. +Inserting Eq. (38) in the previous condition and using +Eq. (25), provides the condition we need to enforce to +obey assumption ii), i.e. +2m2 +fϵ2 +(12π2Pc)1/2 ≪ 1. +(53) +We express ϵ using the scalar field profile approximation +in Eq. (37). Indeed, with simple manipulations, one finds +− log ϵ = µR ≫ 1. +(54) +Substituting (54) in (53), and neglecting, at this stage, +the numerical factors one obtains +log +� +mf +P 1/4 +c +� +≪ µR. +(55) +Using the scaling relations in Table II, we obtain +log +� +η +Λ1/2 +� +≪ 1 +Λ2 . +(56) + +8 +Summing up, the following conditions on the parame- +ters +Λ ≪ 1, +(57) +log +� +η +Λ1/2 +� +≪ 1 +Λ2 +(58) +are our self-consistency criteria to check if we are in +a regime in which the scaling reported in Table II are +expected to be valid. While it can be shown that the +second condition implies the first, we prefer writing both +for the sake of clarity. +Notice that, for fixed Λ ≪ 1, +one can violate (58) for increasing values of η, but only +logarithmically. +2. +Confining and deconfining regimes +An important consequence of the scalings collected in +Table II is that the critical mass and radius are inde- +pendent on η at fixed Λ. We shall call the region of the +parameters space where this happens the confining regime +of the solutions. Indeed, in this regime the size of the +soliton is dictated by the parameters of the scalar field, i.e. +µ and φ0, regardless of the value of the fermion mass mf. +Physically, we expect that this would be the case when +there exists a hierarchy between the scalar and fermion +parameters. Since this hierarchy is measured by η, we +expect that the confining regime exists only when η is +larger than a critical value, ηc. +To better clarify this point, we consider again Eq. (18) +for the Fermi momentum, +k2 +F(ρ) = ω2 +Fe−2u(ρ) − mf +� +1 − φ(ρ) +φ0 +�2 +. +(59) +In the mf → 0 limit this quantity becomes positive defi- +nite and so the fermionic pressure cannot vanish at any +finite radius. In other words, the radius of the star can +be arbitrarily large, provided that mf is sufficiently small. +This is nothing but the well-known fact that a star made +of purely relativistic gas does not exist. +Hence, if we enter a regime where the fermion bare +mass mf is so small that, even after the scalar field has +moved away from the false vacuum (where the effective +fermion mass is small by construction), the Fermi gas is +still relativistic, then the radius of the star grows fast and +a small variation in mf produces a big variation in the +radius. We call this regime the deconfining regime of the +solution. +In terms of the dimensionless variables defined above, +the mf → 0 limit becomes +˜ωF → ∞. +(60) +Therefore, we expect that, for a given choice of (Λ, η), +the confining regime exists only if ˜ωc +F is smaller then a +certain value. Using the scaling for ˜ωc +F in Table II, this +can be translated into the condition +Λ1/2 +η +< C, +(61) +where C is a constant that has to be determined numeri- +cally. +At this point, it is natural to define ηc as the value of +η in which Eq. (61) is saturated. In this way, Eq. (61) +becomes +η > ηc = CΛ1/2. +(62) +To summarize, when η ≳ ηc (confining regime) the +size of the soliton near the maximum mass is mostly +determined by the properties of the scalar field, whereas +it strongly depends on the fermion mass when η ≲ ηc +(deconfining regime5). +C. +Energy conditions +For an energy-momentum tensor of the form +T µ +ν = diag{−ρ, P1, P2, P3}, +(63) +the energy conditions take the following form: +• Weak energy condition: ρ ≥ 0 and ρ + Pi ≥ 0 +• Strong energy condition: ρ+� +i Pi ≥ 0 and ρ+Pi ≥ 0 +• Dominant energy condition ρ ≥ |Pi|. +For a spherically symmetric configuration, P1 = Pr is +the radial pressure, while P2 = P3 = Pt is the tangential +pressure. For our model, +ρ = U + V + W , +(64) +Pr = V − U + P , +(65) +Pt = −U − V + P . +(66) +Since, V, W, P are nonnegative quantities, we obtain ρ + +Pr ≥ 0 and ρ+Pt ≥ 0. Thus, the weak and strong energy +conditions are satisfied if +U + V + W ≥ 0, +(67) +3P − 2U + W ≥ 0 , +(68) +respectively. Since also U is a nonnegative quantity, the +weak energy condition is always satisfied, while the strong +energy condition can be violated. +In particular, it is +violated even in the absence of fermions (P = W = 0). +5 Note that, deep in the deconfining regime (when η → 0), the +Compton wavelength of the fermion, 1/mf, might become com- +parable to or higher than the radius of the star. In this case we +expect the Thomas-Fermi approximation to break down. + +9 +5 +10 +15 +20 +25 +30 +35 +10-8 +10-6 +10-4 +10-2 +100 +10 +20 +30 +40 +10-8 +10-6 +10-4 +10-2 +100 +FIG. 2: Radial profiles of the adimensional pressure ˜P, scalar profile y and metric functions u and v for two example +configurations. Left: Λ = 0.141, η = 1.26, and ˜Pc = 0.00903. The mass and radius of the soliton fermion star are +µM/m2 +p = 6.14 and µR = 33.8, respectively. This solution falls within the confining regime. Right: Λ = 0.141, +η = 0.996, and ˜Pc = 0.0222. The mass and radius of the soliton fermion star are µM/m2 +p = 5.71 and µR = 39.3, +respectively. This solution falls within the deconfining regime. +The dominant energy condition, instead, gives two +inequalities: +U + V + W ≥ |P + V − U|, +(69) +U + V + W ≥ |P − V − U|. +(70) +One can show that the dominant energy condition is +satisfied whenever +W + 2(U + V ) ≥ P, +(71) +This inequality is satisfied if +W − P ≥ 0 , +(72) +which can be shown to be true using the analytic expres- +sions of W and P. +To sum up, the weak and dominant energy conditions +are always satisfied, while the strong energy condition can +be violated (e.g. in the absence of fermions) as generically +is the case for a scalar field with a positive potential [11]. +IV. +NUMERICAL RESULTS +In this section, we present the fermion soliton solutions +in spherical symmetry obtained by integrating the field +equations (23). We will confirm the existence of a solution +beyond the thin-wall approximation used in Ref. [13] +(example solutions are shown in Fig. 2). +Also, based +on the numerical solutions, we are able to confirm the +scalings derived in the previous sections in a certain region +of the parameter space and fix their prefactors. +A. +Numerical strategy +In this section, we summarise the numerical strategy +we adopt to find soliton fermion solutions. Given the +boundary condition (24), the set of equations (23) are +solved numerically by adopting the following strategy: +1. We fix a certain value of ˜Pc; +2. for a given value of ˜Pc and of the central scalar field +(i.e., a value of ϵ), we obtain ˜ωF, and therefore x +through the last equation in (23); +3. for fixed ˜Pc and ϵ, we integrate the first three equa- +tions in (23) for the variables (u, v, y), starting from +r ≈ 0 to the point r = Rf where the fermion pres- +sure drops to negligible values, ˜P(Rf) = 0; +4. we eliminate the fermionic quantities from the sys- +tem of equations (23) and start a new integration +with initial conditions given at r = Rf imposing +continuity of the physical quantities. That is, the +initial conditions on the metric and scalar field at +r = Rf are obtained from the last point of the +previous integration up to r = Rf; +5. we use a shooting method to find the value of ϵ +that allows an asymptotically-flat solution to exist, +which means imposing y(r → ∞) → 0; +6. as previously discussed, because the scalar field does +not have a compact support, we define the radius +of the star (R > Rf) as that containing 99% of the +total mass, i.e. +˜m(R) = 0.99 µM/m2 +p (Eq. (27)), +and the compactness is GM/R; + +10 +0 +10 +20 +30 +40 +0 +2 +4 +6 +8 +10 +0 +2 +4 +6 +8 +10 +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +0 +5 +10 +15 +20 +25 +30 +35 +0 +2 +4 +6 +8 +10-1 +100 +101 +10-5 +10-4 +10-3 +10-2 +10-1 +100 +101 +0 +2 +4 +6 +8 +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +FIG. 3: Mass-radius (left panels) and compactness-mass (right panels) diagrams for fermion soliton stars. The top +panels refer to various values of (Λ, η) in the confining regime (η > ηc, see Sec. III B 2). As a reference, in the top-left +panel we also draw the lines R = 2 GM, R = 9/4 GM, R = 3 GM, corresponding to the Schwarzschild radius, the +Buchdhal’s limit [21], and the photon-sphere radius. The bottom panels refer to various values of η for fixed Λ = 0.141. +The smallest value of η considered is near to but greater than the critical value. The inset shows the curves in +logarithmic scale, to highlight that in this case there exists a turning point in the M-R diagram at low masses that +proceeds towards the Newtonian limit of small M and large R. +7. Finally, we repeat the procedure for a range of values +of ˜Pc, finding a one-parameter family of solutions. +As we shall discuss, in certain regimes (including +the deconfining one) this family exists only if ˜Pc +is above a certain threshold, therefore lacking a +Newtonian limit. +As already noted, a vanishing scalar field (y = 0, ∂ry = +0) is a solution to the scalar equation in Eq. (23) only if +S = 0, that is in the absence of fermions. This ensures +that in any solution with y → 0 at infinity the fermion +pressure must vanish at some finite radius. Therefore, +the fermion soliton solution is described by a fermion +fluid confined at r ≤ Rf and endowed with a real scalar +field that is exponentially suppressed outside the star, as +expected from the discussion in Sec. III. +As described in the previous section, important param- +eters are the mass and radius of the critical solutions, +Mc and Rc. In practice, we compute these quantities by +identifying in the M-R diagram the point of maximum +mass. +B. +Fermion soliton stars +First of all, we confirm that fermion soliton stars exist +also beyond the thin-wall approximation used in Ref. [13]. +An example is shown in Fig. 2 which presents the radial +profiles for the metric, scalar field, and fermion pressure. +Inspecting both panels of Fig. 2 can help us understand +the qualitative difference between solutions in the con- +fining regime (left) and the deconfining one (right). In +the first case, as soon as the scalar field moves away from +its central value at ρ → 0, and the effective mass of the +fermion field grows, the pressure quickly drops to zero. +This reflects in the fact that the macroscopic size of the +star R is found to be very close to where the scalar field + +11 +100 +101 +102 +10-2 +10-1 +100 +101 +102 +100 +101 +102 +10-3 +10-2 +10-1 +100 +101 +102 +FIG. 4: Left: Behaviour of the critical radius Rc with Λ and η. The scaling (62) is highlighted by the diagonal black +dashed line. We observe an agreement until Λ ≲ 0.3 whereas, for larger Λ, ηc increasingly exceeds the predicted value. +The horizontal grid-line highlights when the µR > 1 regime ends. The shaded region above the two dashed lines is the +confining regime. Right: Behaviour of the critical radius Mc with Λ and η. We observe that the critical mass does +not exhibit a significant change of behaviour for η < ηc. +starts moving away from the false vacuum. This is the +reason why the macroscopic properties of the star are +mainly dictated by the scalar field potential. In the latter +case, the small bare mass of fermions makes them remain +ultra-relativistic even when the scalar field moves away +from the false vacuum, generating a layer where fermionic +pressure drops exponentially but remains finite. After the +energy of fermions has fallen within the non-relativistic +regime, fermionic pressure rapidly vanishes. The exis- +tence of such a layer makes the final mass and radius of +the star dependent on the fermion mass, see more details +below. Also, as the numerical shooting procedure requires +matching the asymptotic behavior of the scalar field out- +side the region where the energy density of the fermions +remains sizeable, deconfining solutions are characterized +by a larger tuning of the parameter controlling the central +displacement ϵ. +In Fig. 3 we present the mass-radius and compactness- +mass diagrams for various values of Λ and η, in the confin- +ing regime. In the top panels, we observe that Λ strongly +affects the mass-radius scale and the maximum mass, +while from the bottom panels we observe that η has a +weaker impact on the maximum mass, as expected from +the discussion in Sec. III. +The dependence of Mc and Rc on Λ and η is presented +in Fig. 4. As expected, we observe that, for a fixed Λ, +there is a critical value of η, below which the radius begins +to grow rapidly. For η > ηc and Λ ≲ 0.5, we observe that +the predictions given in Sec. III are valid, confirming the +existence of a confining regime. Indeed, in that region +of the parameter space, both the mass and radius have +a little dependence on η. This dependence grows very +slowly for increasing value of η, in agreement with Eq. (58). +Moreover, the value of ηc scales, for Λ ≲ 0.3, in agreement +with Eq. (62), while for larger values of Λ it exceeds the +analytical scaling. At variance with the critical radius, +the critical mass does not exhibit a change of behaviour +for η < ηc. As a consequence, the compactness decreases +quickly. +Finally, in Table III we report the scaling coefficients +computed numerically, which are valid in the confining +regime (η ≳ ηc, Λ ≲ 0.5). +C. +On the existence of a Newtonian regime +From the bottom panels of Fig. 3, we observe that, even +though η has a weak impact on the maximum mass, it +can qualitatively change the M −R diagram, especially at +low masses. Overall, the mass-radius diagram reassembles +that of solitonic boson stars [15–18] with several turning +points in both the mass and the radius, giving rise to +multiple branches (see also [22]). The main branch is +the one with M ′(R) > 0 before the maximum mass, +which is qualitatively similar to that of strange (quark) +stars [23, 24]. However, the low-mass behavior (and the +existence of a Newtonian regime) depends strongly on η. +For sufficiently large values of η (always in the confining +regime) there exists a low-compactness branch in which +M ′(R) < 0 and where the fermionic pressure is small +compared to the energy density, giving rise to a Newtonian +regime. However, an interesting effect start occurring for +values of η near to, but greater than, the critical one (e.g., +the blue curve for η = 1.26 in the bottom panels of Fig. 36) +6 Notice that, in the bottom left panel, it is not possible to see the +complete tail of the M-R diagram. As underlined in the text, +in the center right panel of Fig. 5 we plot the complete M-R +diagram. + +12 +TABLE III: Various scaling of the critical parameters with +coefficients derived numerically in the Λ ≲ 0.5 range. +Critical mass +µMc/m2 +p ≈ 0.19/Λ2 +Critical radius +µRc ≈ 0.71/Λ2 +Compactness of the critical solution +Cc ≈ 0.27 +Critical value of the scale ratio +ηc ≈ 2.7 Λ1/2 +all the way down to the deconfining regime. In this case, +there is still a lower turning point in the M-R diagram, +but the compactness eventually starts growing (see right +bottom panel). In this case there is no Newtonian regime, +since the compactness is never arbitrarily small. +This peculiar behavior is also related to another im- +portant feature of the model, namely the fact that, for η +sufficiently small, fermion soliton stars exist only above +a minimum threshold for the central fermionic pressure. +We clarify this point in Fig. 5. In the left panels we +show the mass of the star as a function of the central +fermionic pressure for Λ = 0.141 and three values of η. +For η = 0.966 and η = 1.26 (top and center panels), the +pressure has a lower bound, corresponding to the absence +of a Newtonian limit. For η = 2.92 (bottom panels) the +behavior is qualitatively different and in this case the +Newtonian regime is approached as Pc → 0. +To clarify where the minimum pressure and these mul- +tiple branches are in the mass-radius diagram, in the right +panels of Fig. 5, we show data points for M − R using +the same color scheme as in the corresponding left panels. +Interestingly, the minimum pressure does not correspond +to the minimum mass in Fig. 5, but it is an intermediate +point in the M − R diagram. In the center right panel we +show an extended version of the Λ = 0.141, η = 1.26 curve +shown in Fig. 3. This highlights the peculiar behavior of +the new branch, which has a further turning point at large +radii. Studying the stability of these different peculiar +branches [22] is left for future work. +Finally, note that in both cases there are values of +the central fermionic pressure corresponding to multiple +solutions, each one identified by a different central value +of the scalar field. +V. +PARAMETER SPACE AND +ASTROPHYSICAL IMPLICATIONS +Given the number of parameters of our model, it is +interesting to study the characteristic mass and radius of +fermion soliton stars in this theory. By defining +q ≡ (µφ2 +0)1/3, +(73) +as long as we are in the confining regime, one finds +Mc ∼ 0.19 +8π +m4 +p +q3 ∼ 1.27 M⊙ +� +q +5 × 105 GeV +�−3 +, +(74) +Rc ∼ 0.71 +8π +m2 +p +q3 ∼ 6.5 km +� +q +5 × 105 GeV +�−3 +, +(75) +where we included the prefactors obtained using the nu- +merical results. Given the cubic dependence on q, the +model can accommodate compact objects of vastly dif- +ferent mass scales, while the compactness at the maxi- +mum mass is independent of q, GMc/Rc ∼ 0.27, which +is slightly larger than that of a typical neutron star, but +still smaller than the compactness of the photon sphere. +As a consequence, one expects fermion soliton stars to +display a phenomenology more akin to ordinary neutron +stars than to black holes [14]. The authors of Ref. [13] +considered the value q = 30 GeV, yielding supermassive +objects with Mc ∼ 1012 M⊙ and Rc ∼ 1013 km ∼ 0.3 pc. +Instead, the choice +q = qastro ∼ 5 × 105 GeV +(76) +leads to the existence of soliton solutions of mass and +radius comparable to ordinary neutron stars. +Furthermore, the fact that the model is in the confining +regime only above a critical value of η, Eq. (62), implies +(using Eq. (22) and our numerical results): +mf > 2.7 +�√ +8πq3 +mp +�1/2 +∼ 0.6 GeV +� +q +qastro +�3/2 +, +(77) +a range including the neutron mass. +Therefore, the +fermion gas can be a standard degenerate gas of neutrons. +It is also interesting to combine the above inequality (sat- +urated when mf = mc +f) with Eq. (74), finding a relation +between the maximum mass of the soliton in the confining +regime and the critical fermion mass, +Mc ∼ 0.46 +� +GeV +mc +f +�2 +M⊙ , +(78) +independently of q. Interestingly, this model allows for +subsolar compact objects for fermions at (or slightly heav- +ier than) the GeV scale, whereas it allows for supermassive +(Mc ∼ 106M⊙) compact stars for a degenerate gas of elec- +trons (mc +f ∼ 0.5 MeV). +Clearly, the same value of q can be obtained with dif- +ferent combinations of µ and φ0. In general, +µ = 500 +� +q +qastro +�3 �500 TeV +φ0 +�2 +TeV +(79) += 500 +� +mc +f +0.6 GeV +�2 �500 TeV +φ0 +�2 +TeV , +(80) +so µ ∼ GeV for q = qastro (or, equivalently, for mc +f = +0.6 GeV) and φ0 ∼ 3 × 105 TeV. Note that the latter +value is still much smaller than the Planck scale, so the +condition Λ ≪ 1 is satisfied. From our numerical results, +Eqs. (74)–(75) are valid as long as Λ ≲ 0.5 whereas, for +larger values of Λ, Mc, Rc, and Cc decrease rapidly and + +13 +10-2 +10-1 +100 +101 +102 +103 +4 +5 +6 +7 +8 +9 +10 +0.02 +0.03 +0.04 +0.05 +4 +5 +6 +7 +8 +9 +10 +20 +40 +60 +80 +100 +120 +140 +4 +5 +6 +7 +8 +9 +10 +10-2 +10-1 +100 +101 +102 +0 +2 +4 +6 +8 +10 +20 +40 +60 +80 +100 +120 +0 +2 +4 +6 +8 +10 +10-4 +10-3 +10-2 +10-1 +100 +10-2 +10-1 +100 +101 +0 +5 +10 +15 +20 +25 +30 +35 +0 +2 +4 +6 +8 +10 +0 +10 +20 +30 +40 +10-2 +10-1 +100 +101 +FIG. 5: Left panels: The mass of fermion soliton stars as a function of the central fermionic pressure. Right panels: +The corresponding mass-radius diagram using the same color scheme as in the left panels, in order to associate to each +point the corresponding central pressure. Top: Λ = 0.141 and η = 0.996. This solution is in the deconfining regime +and there is a lower bound on ˜Pc below which no solution exists. Center: Λ = 0.141 and η = 1.26. This solution is in +the confining regime but, also in this case, there exists a lower bound on ˜Pc. Bottom: Λ = 0.141 and η = 2.92. This +solution is in the confining regime but, given the larger value of η, there is no lower bound on ˜Pc and a Newtonian +regime exists. In all three cases, for a certain range of ˜Pc there are multiple solutions with the same central fermionic +pressure and different central value of the scalar field. +the condition µR ≫ 1 might not hold (see Fig. 4). This +gives an upper bound on φ0 +φ0 ≲ 0.5 +√ +8π mp ∼ 1018 GeV, +(81) +which, using Eq. (79), can be translated into a lower +bound on µ +µ ≳ 8.4 × 10−11 +� +q +qastro +�3 +eV. +(82) + +14 +Thus, also the scalar-field mass can vastly change depend- +ing on the value of q, reaching a lower limit that can +naturally be in the ultralight regime. +Finally, in the deconfining regime there is no minimum +fermion mass so solutions can exist also beyond the range +dictated by Eq. (77), but soliton fermion stars in such a +regime would be characterized by smaller values of the +compactness (see discussion in Sec IV). +VI. +CONCLUSIONS +We have found that fermion soliton stars exist as static +solutions to Einstein-Klein-Gordon theory with a scalar +potential and a Yukawa coupling to a fermion field. This +confirms the results of Ref. [13] obtained in the thin-wall +approximation and provides a way to circumvent the no- +go theorems [10, 11] for solitons obtained with a single +real scalar field. +Focusing on spherical symmetry, we have explored the +full parameter space of the model and derived both an- +alytical and numerical scalings for some of the relevant +quantities such as the critical mass and radius of a fermion +soliton star. Interestingly, the model predicts the exis- +tence of solutions in the subsolar/solar (resp. supermas- +sive) range for a standard gas of degenerate neutrons +(resp. electrons). +We also unveiled the existence of a confining and decon- +fining regime – where the macroscopic properties of the +soliton are mostly governed by the scalar field parameters +or by the fermion mass, respectively – and the fact that +no Newtonian analog exists for these solutions for fermion +masses below a certain threshold. +Extensions of our work are manifold. First of all, for +simplicity, we have focused on a scalar-fermion coupling +tuned to provide an almost vanishing effective fermion +mass in the stellar core. This assumption imposes f = +mf/φ0, a condition that can be relaxed, thus increasing +the dimensionality of the parameter space. +We have +also considered a scalar potential with two degenerate +minima. A straightforward generalization is to break this +degeneracy and allow for a true false-vacuum potential +in which the scalar field transits from the false-vacuum +state inside the star to the true-vacuum state at infinity. +From the point of view of the fundamental theory, it +would be interesting to investigate an embedding within +the Standard Model and beyond, also including gauge +fields (e.g., see Ref. [25] for a recent attempt along this +direction). +Finally, although we focused on static and spherically +symmetric solutions, there is no fundamental obstacle in +considering spinning configurations and the dynamical +regime, both of which would be relevant to study the +phenomenology of fermion soliton stars, along the lines +of what has been widely studied for boson stars [9] and +for mixed fermion-boson stars [26]. In particular, due to +the existence of multiple branches [22] and the absence of +a Newtonian limit in certain cases, an interesting study +concerns the radial linear stability of these solutions. +We hope to address these points in future work. +ACKNOWLEDGMENTS +We thank Enrico Barausse, Mateja Bošković, and Mas- +simo Vaglio for useful conversations. G.F. and P.P. ac- +knowledge financial support provided under the Euro- +pean Union’s H2020 ERC, Starting Grant agreement +no. DarkGRA–757480 and under the MIUR PRIN pro- +gramme, and support from the Amaldi Research Cen- +ter funded by the MIUR program “Dipartimento di Ec- +cellenza" (CUP: B81I18001170001). +The research of +A.U. was supported in part by the MIUR under con- +tract 2017 FMJFMW (“New Avenues in Strong Dynam- +ics,” PRIN 2017). This work was supported by the EU +Horizon 2020 Research and Innovation Programme un- +der the Marie Sklodowska-Curie Grant Agreement No. +101007855. +Appendix A: Connection with scalar-tensor theories +In this appendix we discuss whether the model for +fermion soliton stars presented in the main text can also +arise in the context of a scalar-tensor theory of gravity +(see, e.g., [27] for a review on modified theories of gravity). +In the so-called Jordan frame7, where gravity is mini- +mally coupled to matter fields, scalar-tensor theories are +described by the action (see for example [28]) +ˆS = +� +d4x +√−ˆg +16πG +� +F(ˆφ) ˆR − Z(ˆφ)ˆgµν∂µ ˆφ∂ν ˆφ − ˆU(ˆφ) +� ++ +ˆSm( ˆψm; ˆgµν) . +(A1) +The coupling functions F and Z single out a particular +theory within the class. For example, Brans-Dicke theory +corresponds to F = ˆφ and Z = ω0/ˆφ, where ω0 is a +constant. +We can write the theory in an equivalent form in the +so-called Einstein frame, where gravity is minimally cou- +pled to the scalar field. For this purpose, we perform a +conformal transformation of the metric, ˆgµν = A2(φ)gµν +with A(φ) = F −1/2(ˆφ), a field redefinition, φ = φ(ˆφ), and +a conformal rescaling of the matter field, ˆψm → ψm. The +scalar field φ is now minimally coupled to gµν whereas ψm +is minimally coupled to ˆgµν [28]. The energy-momentum +tensor is Tµν = A2(φ) ˆTµν whereas the scalar potential +becomes U(φ) = +ˆU( ˆφ) +16πGF 2( ˆφ) +7 In this appendix we used a hat to denote quantities in the Jordan +frame, whereas quantities without the hat refer to the Einstein +frame where gravity is minimally coupled to the scalar field. + +15 +The scalar field equation in the Einstein frame reads +□φ = −T d log A(φ) +dφ ++ ∂U +∂φ . +(A2) +Since in our theory (1) the scalar field is minimally +coupled to gravity, it is natural to interpret it in the +context of the Einstein frame. Thus, we can compare +Eq. (A2) to the second equation in (11): +□φ = −fS + ∂U +∂φ , +(A3) +which, using Eq. (8), can be written as +□φ = +f +(mf − fφ)T + ∂U +∂φ . +(A4) +Therefore, if we identify +d log A(φ) +dφ += +−f +(mf − fφ) = +1 +φ − φ0 +, +(A5) +the scalar equation of our model is the same as in a +scalar-tensor theory with coupling A(φ) in the Einstein +frame. Integrating this equation yields (henceforth as- +sumig A(0) = 1), +A(φ) = 1 − φ +φ0 += meff +mf +. +(A6) +Interestingly, the matter coupling vanishes when φ ≈ φ0. +It is left to be checked if the gravitational sector of our +model is equivalent to that of a scalar-tensor theory with +A(φ) given by Eq. (A6). Let us consider a degenerate gas +of noninteracting fermions with mass mf in the Jordan +frame, with energy-momentum +ˆT µν = ( ˆW + ˆP)ˆuµˆvν + ˆgµν ˆP +(A7) +where, assuming spherical symmetry, +ˆW(ˆρ) = +2 +(2π)3 +� ˆkF (ˆρ) +0 +d3k +� +k2 + m2 +f +ˆP(ˆρ) = +2 +(2π)3 +� ˆkF (ˆρ) +0 +d3k +k2 +3 +� +k2 + m2 +f +. +(A8) +In spherical symmetry, since the spacetime has the same +form as in Eq. (13), it is straightforward to minimize the +energy of the fermion gas at fixed number of fermions +(the calculation is exactly the same as the one done to +obtain Eq. (18)): +ˆk2 +F = ˆω2 +F e−2ˆu − m2 +f . +(A9) +It is important to notice that in the standard scalar- +tensor theory in the Jordan frame there is no Yukawa +interaction, therefore the fermion particles do not acquire +any effective mass. +In the Einstein frame, Eq. (A7) simply reads +T µν = (W + P)uµuν + gµνP , +(A10) +where W = A4(φ) ˆW and P = A4(φ) ˆP. Therefore, also +in the Einstein frame we have a perfect fluid in the form +of a zero-temperature Fermi gas. Let us now compute +the expressions of W and P explicitly. First of all, from +Eq. (A8), following the same computation presented in +the main text, we get +ˆW = +m4 +f +8π2 +� +ˆx +� +1 + ˆx2(1 + 2ˆx2) − log +� +ˆx + +� +ˆx2 + 1 +�� +ˆP = +m4 +f +8π2 +� +ˆx +�2 +3 ˆx2 − 1 +�� +1 + ˆx2 + log +� +ˆx + +� +ˆx2 + 1 +�� +(A11) +where ˆx = ˆkF /mf. Since A(φ) = meff/mf, we obtain +W = m4 +eff +8π2 +� +ˆx +� +1 + ˆx2(1 + 2ˆx2) − log +� +ˆx + +� +ˆx2 + 1 +�� +P = m4 +eff +8π2 +� +ˆx +�2 +3 ˆx2 − 1 +�� +1 + ˆx2 + log +� +ˆx + +� +ˆx2 + 1 +�� +. +(A12) +Note that W(ˆx) and P(ˆx) above implicitly define an equa- +tion of state that is exactly equivalent to that obtained +from W and P in Eqs. (21a)– (21b). This shows that +our model can be interpreted as a scalar-tensor theory +in the Einstein frame with coupling to matter given by8 +A(φ) = meff/mf. +Furthermore, note that the dimensionless quantity ˆx = +ˆkF /mf = kF /meff = x is invariant under a change from +the Jordan to the Einstein frame. Therefore, W and P +are exactly those given in Eqs. (21a)–(21b). +Finally, ˆS in the Jordan frame reads +ˆS = +2 +(2π)3 +� ˆkF +0 +d3k +mf +� +k2 + m2 +f += +m3 +f +2π2 +� +ˆx +� +1 + ˆx2 − log +� +ˆx + +� +ˆx2 + 1 +�� +(A13) +while in the Einstein frame9 +S = A3 ˆS = m3 +eff +2π2 +� +x +� +1 + x2 − log +� +x + +� +x2 + 1 +�� +, +(A14) +8 Note that our model and the scalar-tensor theory are not exactly +equivalent to each other. Indeed, while in the scalar-tensor theory +any matter field is universally coupled to A(φ)ˆgµν, in our model +this is the case only for the fermion gas, while any other matter +field is minimally coupled to the metric, in agreement with the +fact that our model is based on standard Einstein’s gravity. +9 The fact that S = A3 ˆS can be derived from the condition +A4(φ) ˆT = T ⇒ A4(φ)mf ˆS = meffS. + +16 +since ˆx = x. Thus, also in this case we obtain the same +expression as in Eq. (21c). +Having assessed that our model can be interpreted in +the context of a scalar-tensor theory, it is interesting to +study the latter in the Jordan frame. In particular, since +A(φ) = +1 +� +F(ˆφ) +, +(A15) +and A(φ) = 1 − φ/φ0, the coupling function F(ˆφ) is +singular in ˆφ(φ0). In the language of the scalar-tensor +theory, we see that in the core of a fermion soliton star, +where φ ≈ φ0 and matter is almost decoupled in the +Einstein frame, the scalar field in the Jordan frame is +strongly coupled to gravity. +[1] J. A. Wheeler, Phys. Rev. 97, 511 (1955). +[2] C. A. R. Herdeiro and E. Radu, Int. J. Mod. Phys. D 24, +1542014 (2015), arXiv:1504.08209 [gr-qc]. +[3] D. J. Kaup, Phys. Rev. 172, 1331 (1968). +[4] R. Ruffini and S. Bonazzola, Phys. Rev. 187, 1767 (1969). +[5] M. Colpi, S. L. Shapiro, and I. Wasserman, Phys. Rev. +Lett. 57, 2485 (1986). +[6] S. R. Coleman, Nucl. Phys. B 262, 263 (1985), [Adden- +dum: Nucl.Phys.B 269, 744 (1986)]. +[7] P. Jetzer, Phys. Rept. 220, 163 (1992). +[8] F. E. Schunck and E. W. Mielke, Class. Quant. Grav. 20, +R301 (2003), arXiv:0801.0307 [astro-ph]. +[9] S. L. Liebling and C. Palenzuela, Living Rev. Rel. 15, 6 +(2012), arXiv:1202.5809 [gr-qc]. +[10] G. H. Derrick, J. Math. Phys. 5, 1252 (1964). +[11] C. A. R. Herdeiro and J. a. M. S. Oliveira, Class. Quant. +Grav. 36, 105015 (2019), arXiv:1902.07721 [gr-qc]. +[12] E. Seidel and W. M. Suen, Phys. Rev. Lett. 66, 1659 +(1991). +[13] T. D. Lee and Y. Pang, Phys. Rev. D 35, 3678 (1987). +[14] V. Cardoso and P. Pani, Living Rev. Rel. 22, 4 (2019), +arXiv:1904.05363 [gr-qc]. +[15] R. Friedberg, T. D. Lee, and Y. Pang, Phys. Rev. D 35, +3658 (1987). +[16] C. Palenzuela, P. Pani, M. Bezares, V. Cardoso, L. Lehner, +and S. Liebling, Phys. Rev. D 96, 104058 (2017), +arXiv:1710.09432 [gr-qc]. +[17] M. Bezares, M. Bošković, S. Liebling, C. Palenzuela, +P. Pani, +and E. Barausse, Phys. Rev. D 105, 064067 +(2022), arXiv:2201.06113 [gr-qc]. +[18] M. Bošković and E. Barausse, JCAP 02, 032 (2022), +arXiv:2111.03870 [gr-qc]. +[19] S. L. Shapiro and S. A. Teukolsky, Black holes, white +dwarfs, and neutron stars: The physics of compact objects +(1983). +[20] T. Lee and Y. Pang, Physics Reports 221, 251 (1992). +[21] H. A. Buchdahl, Phys. Rev. 116, 1027 (1959). +[22] D. Guerra, C. F. B. Macedo, +and P. Pani, JCAP +09, 061 (2019), [Erratum: +JCAP 06, E01 (2020)], +arXiv:1909.05515 [gr-qc]. +[23] C. Alcock, E. Farhi, and A. Olinto, Astrophys. J. 310, +261 (1986). +[24] A. Urbano and H. Veermäe, JCAP 04, 011 (2019), +arXiv:1810.07137 [gr-qc]. +[25] Y. Endo, H. Ishihara, and T. Ogawa, Phys. Rev. D 105, +104041 (2022), arXiv:2203.09709 [hep-th]. +[26] S. Valdez-Alvarado, +C. Palenzuela, +D. Alic, +and +L. A. Ureña López, Phys. Rev. D 87, 084040 (2013), +arXiv:1210.2299 [gr-qc]. +[27] E. Berti et al., Class. Quant. Grav. 32, 243001 (2015), +arXiv:1501.07274 [gr-qc]. +[28] T. P. Sotiriou and V. Faraoni, Rev. Mod. Phys. 82, 451 +(2010), arXiv:0805.1726 [gr-qc]. + diff --git a/SNFAT4oBgHgl3EQf1h4R/content/tmp_files/load_file.txt b/SNFAT4oBgHgl3EQf1h4R/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..fd90dc3d223afa29e421d24890472fc4751b0d9f --- /dev/null +++ b/SNFAT4oBgHgl3EQf1h4R/content/tmp_files/load_file.txt @@ -0,0 +1,808 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf,len=807 +page_content='Fermion Soliton Stars Loris Del Grosso,1, 2 Gabriele Franciolini,1, 2 Paolo Pani,1, 2 and Alfredo Urbano1, 2 1Dipartimento di Fisica, Sapienza Università di Roma, Piazzale Aldo Moro 5, 00185, Roma, Italy 2INFN, Sezione di Roma, Piazzale Aldo Moro 2, 00185, Roma, Italy (Dated: January 23, 2023) A real scalar field coupled to a fermion via a Yukawa term can evade no-go theorems preventing solitonic solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For the first time, we study this model within General Relativity without approximations, finding static and spherically symmetric solutions that describe fermion soliton stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The Yukawa coupling provides an effective mass for the fermion, which is key to the existence of self-gravitating relativistic solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We systematically study this novel family of solutions and present their mass-radius diagram and maximum compactness, which is close to (but smaller than) that of the corresponding Schwarzschild photon sphere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, we discuss the ranges of the parameters of the fundamental theory in which the latter might have interesting astrophysical implications, including compact (sub)solar and supermassive fermion soliton stars for a standard gas of degenerate neutrons and electrons, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' CONTENTS I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Introduction 1 II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Setup 2 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thomas-Fermi approximation 2 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Dimensionless equations of motion and boundary conditions 3 III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Some preliminary theoretical considerations 5 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the crucial role of fermions for the existence of solitonic stars 5 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Scaling of the physical quantities in the µR ≫ 1 regime 6 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Energy conditions 8 IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Numerical results 9 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Numerical strategy 9 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Fermion soliton stars 10 C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the existence of a Newtonian regime 11 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Parameter space and astrophysical implications 12 VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Conclusions 14 Acknowledgments 14 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Connection with scalar-tensor theories 14 References 16 I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' INTRODUCTION Solitonic solutions play a crucial role in many field theories, in particular in General Relativity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the con- text of the latter, starting from Wheeler’s influential idea of geons [1], considerable attention has been devoted to find minimal models allowing for self-gravitating soli- tonic solutions [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The prototypical example is that of boson stars [3–5] (and of their Newtonian analog, Q- balls [6]), which are self-gravitating solutions to Einstein- Klein-Gordon theory with a complex and massive scalar field (see [7–9] for some reviews).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' If the scalar field is real, no-go theorems prevent the existence of solitonic solutions for very generic classes of scalar potential [10, 11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, Einstein-Klein-Gordon theory contains time-dependent solutions known as oscillatons which, however, decay in time [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' About forty years ago, Lee and Pang proposed a model in which a real scalar field with a false-vacuum potential is coupled to a massive fermion via a Yukawa term [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Working in a thin-wall limit in which the scalar field is a step function, for certain parameters of the model they obtained approximated solutions describing fermion soliton stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The scope of this paper is twofold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the one hand we show that fermion soliton stars exist in this model also beyond the thin-wall approximation, and we build exact static solutions within General Relativity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the other hand, we elucidate some key properties of the model, in particular the role of the effective fermion mass provided by the Yukawa coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Then, we explore the model systematically, presenting mass-radius diagrams and the maximum compactness of fermion soliton stars for various choices of the parameters, showing that in this model a standard gas of degenerate neutrons (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' electrons) can support stable (sub)solar (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' supermassive) fermion soliton stars with compactness comparable to that of ordinary neutron stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This analysis paves the way for a detailed study of the phenomenology of fermion soliton stars as a motivated model of exotic compact objects [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, in Appendix A, we explore the connection of the model to a very peculiar scalar-tensor theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We use the signature (−, +, +, +) for the metric, adopt natural units (ℏ = c = 1) and define the Planck mass through G = m−2 p .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='08709v1 [gr-qc] 20 Jan 2023 2 II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' SETUP We consider a theory in which Einstein gravity is mini- mally coupled to a real scalar field φ and a fermion field ψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The action can be written as [13] S = � d4x√−g � R 16πG − 1 2∂µφ∂µφ − U(φ) + ¯ψ(iγµDµ − mf)ψ + fφ ¯ψψ � , (1) where the scalar potential is U(φ) = 1 2µ2φ2� 1 − φ φ0 �2 , (2) and features two degenerate minima at φ = 0 and φ = φ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The constant µ (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' mf) is the mass of the scalar (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' fermion).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The Yukawa interaction is controlled by the coupling f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' It should be noted that Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (1) describes the action of a local field theory and, therefore, we expect that all physics derived from it will naturally respect causality conditions (that, on the contrary, could be violated in the absence of such underlying formulation).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Also, we point out that the matter Lagrangian in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (1) describes a renormalizable field theory;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' this is in contrast to the widely used model describing solitonic boson stars [15–18] in which the scalar potential is non-renormalizable and field values should not exceed the limit of validity of the corresponding effective field theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The covariant deriva- tive Dµ in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (1) takes into account the spin connection of the fermionic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' From the quadratic terms in the fermion Lagrangian, it is useful to define an effective mass, meff = mf − fφ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (3) We will focus on scenarios in which the fermion becomes effectively massless (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' meff = 0) when the scalar field sits on the second degenerate vacuum, φ = φ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This condition implies fixing f = mf φ0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (4) As we shall discuss, we are mostly interested in configu- rations where the scalar field makes a transition between the false1 vacuum (φ ≈ φ0) to the true vacuum (φ ≈ 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thomas-Fermi approximation The description of a fermionic field in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (1) requires treating the quantization of spin-1/2 particles in curved 1 Although the minima at φ = 0 and φ = φ0 are degenerate, we shall call them true and false vacuum, respectively, having in mind the generalization in which the potential U(φ) can be nondegenerate, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' U(φ0) ̸= U(0), see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The problem can be simplified significantly within the Thomas-Fermi approximation2, relying on the following assumptions: i) the gravitational and scalar fields are slowly varying functions with respect to the fermion dynamics, they do not interact directly with the (microscopic) fermionic field ψ, but with average macroscopic quantities (mean-field approximation);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ii) the fermion gas is at equilibrium so that all the macroscopic quantities are time independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In practice, one can divide the entire three-space into small domains which are much larger than the de Broglie wavelength of the typical fermion, but sufficiently small that the gravitational and scalar fields are approximately constant inside each domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Then, every domain is filled with a degenerate (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' the temperature is much smaller than the chemical potential) Fermi gas, in such a way that the Fermi distribution is approximated by a step function, nk = θ(kF − k), where kF(xµ) is the Fermi momentum observed in the appropriate local frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The energy density of the fermion gas reads W = 2 (2π)3 � kF 0 d3k ϵk, (5) where ϵk = � k2 + m2 eff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Notice that W = W(xµ) through the spacetime dependence of kF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In an analogous way, we obtain the fermion gas pressure P and the scalar density S = ⟨ ¯ψψ⟩ as P = 2 (2π)3 � kF 0 d3k k2 3ϵk , (6) S = 2 (2π)3 � kF 0 d3k meff ϵk .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (7) It it easy to show that these quantities satisfy the identity W − 3P = meffS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (8) In the Thomas-Fermi approximation, the fermions enter Einstein’s equations as a perfect fluid characterised by an energy-momentum tensor of the form T [f] µν = (W + P)uµuν + Pgµν, (9) while they also enter the scalar field equation through the scalar density S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, by varying the action in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (1) with respect to φ, we obtain a source term of the form ≈ f ¯ψψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Within the Thomas-Fermi approximation, this becomes f ¯ψψ → f⟨ ¯ψψ⟩ ≡ fS, (10) which is consistent with the fact that, in the fluid descrip- tion, the scalar field equation couples to fermions through a term proportional to the trace (T [f])µ µ = −W + 3P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2 We point the interested reader to Appendix A of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [13] for a complete derivation of the Thomas-Fermi approximation in curved spacetime, while here we summarise the main properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Equations of motion It is now possible to write down the equations of motion for our theory in covariant form Gµν = 8πG Tµν, □φ − ∂U ∂φ + fS = 0, (11) where Tµν = −2 � ∂Lφ ∂gµν − 1 2gµνLφ � + T [f] µν , (12) in which Lφ is the Lagrangian density of the scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In order to close the system, we need an equation de- scribing the behaviour of kF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This is obtained by mini- mizing the energy of the fermion gas at fixed number of fermions [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' From now on, for simplicity, we will consider spherically symmetric equilibrium configurations, whose background metric can be expressed as ds2 = −e2u(ρ)dt2 +e2v(ρ)dρ2 +ρ2(dθ2 +sin2 θdϕ2), (13) in terms of two real metric functions u(ρ) and v(ρ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Fur- thermore, we will assume that the scalar field in its equilib- rium configuration is also static and spherically symmetric, φ(t, ρ, θ, ϕ) = φ(ρ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Being the spacetime static and spher- ically symmetric, kF = kF(ρ) can only be a function of the radial coordinate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Fermi momentum equation In the Thomas-Fermi approximation the fermion gas energy can be written as [13] Ef = 4π � dρ ρ2 eu(ρ)+v(ρ) W, (14) while the number of fermions is N = 4 3π � dρ ρ2ev(ρ)kF(ρ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (15) To enforce a constant number of fermions, we introduce the Lagrangian multiplier ωF and define the functional E′ f[kF] = Ef[kF] − ωF � N[kF] − Nfixed � , (16) which is minimized by imposing the condition δE′ f[kF] δkF(ρ) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (17) One directly obtains ϵF = e−uωF, where ϵF = ϵkF is the Fermi energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, ωF coincides with the Fermi energy in flat spacetime while it acquires a redshift factor otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, we find k2 F(ρ) = ω2 Fe−2u(ρ) − (mf − fφ(ρ))2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (18) B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Dimensionless equations of motion and boundary conditions In order to simplify the numerical integrations, as well as physical intuition, it is convenient writing the field equations in terms of dimensionless quantities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' To this end, we define x = kF mf , y = φ φ0 , r = ρµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (19) Therefore, the potential and kinetic terms become U = µ2φ2 0 �1 2y2(1 − y)2 � ≡ µ2φ2 0 ˜U(y), V = µ2φ2 0 �1 2e−2v(r)(∂ry)2 � ≡ µ2φ2 0 ˜V (y), (20) while Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (5)-(7) can be computed analytically as W = 2 (2π)3 � kF(ρ) 0 d3k � k2 + (mf − fφ(ρ))2 = m4 eff 8π2 � s � 1 + s2(1 + 2s2) − log � s + � s2 + 1 �� ≡ m4 f ˜W(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' y),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21a) P = 2 (2π)3 � kF(ρ) 0 d3k k2 3 � k2 + (mf − fφ(ρ))2 = m4 eff 8π2 � s �2 3s2 − 1 � � 1 + s2 + log � s + � s2 + 1 �� ≡ m4 f ˜P(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' y),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21b) S = 2 (2π)3 � kF(ρ) 0 d3k mf − fφ(ρ) � k2 + (mf − fφ(ρ))2 = m3 eff 2π2 � s � 1 + s2 − log � s + � s2 + 1 �� ≡ m3 f ˜S(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' y),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21c) where ˜W,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ˜P,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ˜S are dimensionless quantities and we intro- duced s ≡ x/(1 − y) for convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Remarkably, these expressions are the same as in the standard case of a minimally coupled degenerate gas with the substitution 4 mf → meff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As we shall discuss in Appendix A, this property will be important when comparing this model to a scalar-tensor theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Note that the massless limit, meff → 0, should be taken carefully as not all the dependence on meff is expressed in the dimensional prefactor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' By performing the first integrals in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21a)-(21c) in the meff → 0 limit, we obtain W = P/3, as expected for an ultrarelativistic degenerate gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' It is convenient to further introduce the dimensionless combination of parameters Λ = √ 8πφ0 mp , η = mf µ1/2φ1/2 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (22) Finally, the field equations (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' the Einstein-Klein-Gordon equations with the addition of the Fermi momentum equation) take the compact form e−2v − 1 − 2e−2vr∂rv = −Λ2r2 � η4 ˜W + ˜U + ˜V � , e−2v − 1 + 2e−2vr∂ru = Λ2r2 � η4 ˜P − ˜U + ˜V � , e−2v� ∂2 ry + � ∂ru − ∂rv + 2 r � ∂ry � = ∂ ˜U ∂y − η4 ˜S, x2 = ˜ω2 Fe−2u(r) − (1 − y)2, (23) where ˜U, ˜V , ˜P, ˜W, and ˜S depend on x, y, and r, and we also introduced ˜ωF = ωF/mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Static and spherically symmetric configurations in the model (1) are solutions to the above system of ordinary differential equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For clarity, we summarise the relevant parameters in Table I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Absence of φ = const solutions Note that, because U = 0 = dU/dφ in both degen- erate vacua, it is natural to first check what happens when φ = φ0 = const or if φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The former case (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' y(ρ) = 1) is an exact solution of the scalar equation and reduces the Einstein’s equations to those of gravity cou- pled to a degenerate gas of massless (since meff(φ0) = 0) fermions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In this case, self-gravitating solutions do not have a finite radius [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the other hand, due to the Yukawa coupling, in the presence of a fermion gas φ = 0 is not a solution to the scalar field equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, self-gravitating solutions to this model must have a nonvanishing scalar-field profile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In particular, we will search for solutions that (approximately) interpolate be- tween these two vacuum states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Boundary conditions at ρ = 0 Regularity at the center of the star (ρ = 0) imposes the following boundary conditions v(r = 0) = 0, u(r = 0) = 0, y(r = 0) = 1 − ϵ, ∂ry(0) = 0, TABLE I: List of the model parameters, the fermion soliton star parameters, and the dimensionless quantities adopted to express the system of equations in compact form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Due to the condition in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (4), in our case only three model parameters are independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Model parameters µ Scalar field mass φ0 VEV of the false vacuum mf Fermion mass f Yukawa coupling Solution parameters (boundary conditions) Pc Fermion central pressure ϵ = 1 − φ/φ0 Central scalar field displacement Dimensionless parameters/variables Λ = √ 8πφ0/mp Dimensionless VEV of the false vacuum η = mf/√µφ0 Scale ratio x = kF/mf Fermi momentum y = φ/φ0 Scalar field r = ρµ Rescaled radius ˜P(r = 0) = ˜Pc,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (24) where ϵ > 0 will be fixed numerically through a shooting procedure in order to obtain asymptotic flatness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In practice, in a large region of the parameter space one obtains ϵ ≪ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In this limit, using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21b) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (18), we find ˜ωF ≡ ωF mf = (12π2 ˜Pc)1/4+ 3 4(12π2 ˜Pc)1/4 ϵ2+O(ϵ3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (25) In general, ˜ωF is fixed in terms of the central values of the pressure and scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, since a shift u(ρ) → u(ρ) + const in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (23) merely corresponds to a shift of the fermionic central pressure, we have imposed u(ρ = 0) = 0 without loss of generality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Definitions of mass, radius, and compactness We define the mass of the object as M = m(ρ → +∞) G , (26) where the function m(ρ) is related to the metric coefficient v(ρ) by e2v(ρ) = 1 − 2m(ρ)/ρ, and can be interpreted as the mass-energy enclosed within the radius ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In terms of the dimensionless variables introduced in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (19), it is convenient to define ˜m(r) = µm(ρ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, one obtains µM m2p = ˜m(r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (27) 5 Notice that, in the asymptotic limit r → ∞, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (27) becomes independent of the radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Typically, the radius of a star is defined as the value of the radial coordinate at the point where pressure drops to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As we shall discuss, in our case the fermion soliton stars will be characterised by a lack of a sharp boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Analogously to the case of boson stars [9], one can define an effective radius R within which 99% of the total mass is contained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (As later discussed, we shall also define the location Rf where only the pressure of the fermion gas vanishes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=') Finally, we can define the compactness of the star as GM/R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' SOME PRELIMINARY THEORETICAL CONSIDERATIONS Before solving the full set of field equations numerically, in this section we provide some theoretical considerations that might be useful to get a physical intuition of the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the crucial role of fermions for the existence of solitonic stars 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Classical mechanics analogy In order to understand why the presence of fermions in this theory plays a crucial role for the existence of station- ary solutions, it is useful to study a classical mechanics analogy for the dynamics of the scalar field [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For the moment we consider flat spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Further- more, we start by ignoring the fermions (we will relax this assumption later on).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The set of Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (23) drastically simplifies to a single field equation ∂2 ρφ + 2 ρ∂ρφ − ∂U ∂φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (28) To make the notation more evocative of a one-dimensional mechanical system, we rename ρ → t, φ(ρ) → φ(t), ˆU := −U, (29) in such a way that the equation of motion becomes φ′′(t) = −∂ ˆU ∂φ − 2 t φ′(t), (30) which describes the one-dimensional motion of a parti- cle with coordinate φ(t) in the presence of an inverted potential, ˆU, and a velocity-dependent dissipative force, −(2/t)φ′(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Within this analogy, the boundary (or initial) conditions (24) simply become φ(t = 0) = φ0 − δφ, φ′(t = 0) = 0, (31) where φ0 is the position of the false vacuum and δφ = ϵφ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As we impose zero velocity at t = 0, the initial energy is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='2 3 2 1 0 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1: Inverted potential with degeneracy (blue line, our case) and without degeneracy between vacua (orange).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' E(0) = ˆU(φ0 − δφ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The energy E(t) of the particle at a time t is obtained by subtracting the work done by the friction: E(t) − E(0) = L(t), (32) where L(t) = −2 � t 0 dt′ ˙φ2(t′) t′ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (33) Note that, owing to the initial conditions, this integral is regular at t = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the other hand, the existence of a solution with asymptotically zero energy requires the particle to arrive with zero velocity at φ = 0 for t → +∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, we impose E(t → ∞) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As the total energy loss due to friction is L(t → ∞), the latter condition means E(0) = −L(t → ∞) (34) that is ˆU(φ0 − δφ) = 2 � ∞ 0 dt′ ˙φ2(t′) t′ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (35) This equation can be interpreted as an equation for δφ in order to allow for the existence of a "bounce" solution3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' One can demonstrate the existence of such a solution heuristically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Let us first consider a slightly modified ver- sion of the inverted potential without degeneracy (orange plot in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Obviously, if the motion starts exactly at φ0 with zero velocity, the particle would remain at 3 A bounce solution is the one reaching asymptotically the true vacuum with zero energy, after having "bounced" at the minimum of the inverted potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 6 rest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' However, if we start on the left of the maximum the particle will roll down, bounce, and eventually climb the leftmost hill shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Now, if the dynamics starts too far from φ0 (still on the left of the maximum), with zero initial velocity it might not have enough energy to reach the zero-energy point at φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Similarly, if the dynamics starts too close to φ0, the particle might reach φ = 0 with positive energy and overcome the hill rolling up to φ → −∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' By continuity, there must exist a unique point such that the total energy loss due to friction compensates the initial gap of energy with respect to the energy of φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' However, by applying the same argument to our degen- erate case (blue curve in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1), it is easy to see that there is no solution to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (35) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This is because the energy loss due to friction is nonzero, so the particle will never reach φ = 0 and is doomed to roll back in the potential eventually oscillating around the minimum of ˆU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This shows that, in the degenerate case considered in this work, a simple scalar model does not allow for bounce solutions in flat spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' If we now reintroduce fermions in the theory, the scalar field equation reads (still in flat spacetime) φ′′(t) = −∂ ˆU ∂φ − 2 t φ′(t) − fS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (36) Since S ≥ 0, the fermions act with a force pushing our particle towards the origin, potentially giving the right kick to allow the particle reaching φ = 0 asymptotically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As we shall see, this also requires S = 0 (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', no fermions) around the origin, in order for the particle to reach a stationary configuration at φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This simple analogy shows how the presence of the fermions is fundamental as it allows the solution to exist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the following section we will show how this is realised in the full theory which includes gravitational effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Furthermore, we will show that, in certain regions of the parameter space, relativistic effects are in fact crucial for the existence of the solution, since the latter requires a minimum fermionic pressure to exist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Evading the no-go theorem for solitons The above conclusions, deduced from our simple heuris- tic picture, holds also in the context of General Relativity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, without fermions in the system of Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (23), and since our potential (2) is nonnegative, a general theorem proves that no axially symmetric and stationary solitons (that is asymptotically flat, localized and everywhere reg- ular solutions) can exist [10, 11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' However, the presence of fermions evades one of the hypotheses of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As we will show, in this case 4 At least if we look for a solution in which the scalar field does the transition at a finite time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' stationary solitons generically exist also for a real scalar field (at variance with the case of boson stars, that require complex scalars) and for a wide choice of the parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Scaling of the physical quantities in the µR ≫ 1 regime Assuming µR ≫ 1, it is possible to derive an analytical scaling for various physical quantities, as originally derived in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [20] and similar in spirit to Landau’s original computation for ordinary neutron stars (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', [19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' It is instructive to consider the following toy model in the absence of gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We consider a theory with an additive quantum number N, brought by a spin-1/2 field ψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We then add a real scalar field φ with the usual potential described in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Such a scalar field obeys Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (28) along with the initial condition (31).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Since µR ≫ 1, its solution is well approximated by a stiff Fermi function [13],[20] φ(ρ) ≈ φ0 1 + eµ(ρ−R) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (37) The definition of kF is (we take Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (18) with u = 0 since we work in absence of gravity) k2 F(ρ) = ω2 F − (mf − fφ(ρ))2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (38) Because of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (37), the Fermi momentum is nearly fixed to the constant value ωF for ρ ≲ R, and for ρ ≈ R it goes to zero stiffly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, the field ψ is approximately confined within the sphere of radius R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We assume that the quanta of ψ are noninteracting, massless (consistently with the fact that we are interested in configurations in which the fermions are approximately massless in the core of the star) and described by Fermi statistics at zero temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, we obtain the standard relation for the particle density n = #particles unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='volume = 2 8π3 � kF 0 4πk2dk = ω3 F 3π2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (39) Since kF ≃ ωF = const, the total number of particles is N = n � R 0 4πρ2dρ = 4 9π (RωF)3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (40) The fermion energy is Ef = � R 0 4πρ2dρ W = (3π)1/3�3 4N �4/3 1 R, (41) where W = energy unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='volume = 2 8π3 � kF 0 4πk2dk · k = ω4 F 4π2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (42) The energy associated to the scalar field φ is instead Es = � R 0 4πρ2dρ (U + V ) ≃ �1 6µφ2 0 � 4πR2 , (43) 7 TABLE II: Analytical scalings of the some physical quan- tities at the maximum mass Mc in the µR ≫ 1 limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Mass µMc/m2 p ∼ 1/Λ2 Radius µRc ∼ µMc/m2 p ∼ 1/Λ2 ˜ωF ˜ωc F ∼ (µ/mp)1/2/(φ0/mf) ∼ Λ1/2/η Central pressure ˜Pc ∼ ˜ω4 F ∼ Λ2/η4 where we have used that fact that 12 µφ2 0 U ≃ 12 µφ2 0 V ≃ δ(ρ − R) , (44) which can be shown using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (37) and µR ≫ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The total energy of our configuration is E = Ef + Es, (45) while the radius can be found by imposing ∂E/∂R = 0, yielding R = � 3 4π (3π)1/3�3 4N �4/3�1/3� 1 µφ2 0 �1/3 (46) and the mass M = E(R) = 12πR2�1 6µφ2 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (47) From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (46),(47), we get R ∼ N 4/9 M ∼ N 8/9 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (48) Thus, at least for large N, the mass of the soliton is lower than the energy of the sum of N free particles, ensuring stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the absence of gravity, M can be arbitrarily large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' However, due to relativistic effects we expect the existence of a maximum mass beyond which the object is unstable against radial perturbations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We expect that gravity becomes important when 2GM/R ∼ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, the critical mass Mc can be estimated by simply imposing R ∼ 2GMc in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (47), yielding G2Mc ∼ 1/µφ2 0 and thus µMc m2p ∼ 1 Λ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (49) Likewise, one can obtain the scaling of all other relevant quantities, which we collect in Table II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Self-consistency criteria When deducing the scaling reported in Table II, we made the following assumptions: i) µR ≫ 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ii) a gas of massless fermions in the interior of the star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In practice, the first assumption is not restrictive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, since µ−1 is the Compton wavelength of the scalar boson, in the context of a classical field theory we should always impose µR ≫ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In other words, if µR ≃ 1 the quantum effects of the scalar field become important on the scale of the star and one cannot trust the classical theory anymore.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The hypothesis µR ≫ 1 is an essential ingredient in order to approximate the scalar field profile with Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (37), and to assume, as a consequence, that the kF is a step function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Besides, it guarantees that the energy density of the scalar field is near to a delta function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Using the scaling reported in Table II, condition i) implies Λ ≪ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' One may worry that the second assumption can be violated, since the scalar field is not located exactly at φ0 in the origin ρ = 0, and therefore fermions are never exactly massless.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' It is enough checking that the fermion gas is very close to be a massless gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Let us recall that the effective mass of the fermion is defined as meff(ρ) = mf � 1 − φ(ρ) φ0 � (50) and therefore meff(ρ = 0) = mfϵ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We can say that the fermion gas is effectively massless when W/P = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (5) and (6), at the lowest order in ϵ one obtains W P = 3 � 1 + 2m2 fϵ2 k2 F � + O(ϵ3), (51) which indicates we should require 2m2 fϵ2 k2 F ≪ 1 (52) in the vicinity of the origin at ρ ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' At larger radii, the scalar field gradually moves away from the central configurations and fermions start retaining a bare mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Inserting Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (38) in the previous condition and using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (25), provides the condition we need to enforce to obey assumption ii), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2m2 fϵ2 (12π2Pc)1/2 ≪ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (53) We express ϵ using the scalar field profile approximation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (37).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, with simple manipulations, one finds − log ϵ = µR ≫ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (54) Substituting (54) in (53), and neglecting, at this stage, the numerical factors one obtains log � mf P 1/4 c � ≪ µR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (55) Using the scaling relations in Table II, we obtain log � η Λ1/2 � ≪ 1 Λ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (56) 8 Summing up, the following conditions on the parame- ters Λ ≪ 1, (57) log � η Λ1/2 � ≪ 1 Λ2 (58) are our self-consistency criteria to check if we are in a regime in which the scaling reported in Table II are expected to be valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' While it can be shown that the second condition implies the first, we prefer writing both for the sake of clarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Notice that, for fixed Λ ≪ 1, one can violate (58) for increasing values of η, but only logarithmically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Confining and deconfining regimes An important consequence of the scalings collected in Table II is that the critical mass and radius are inde- pendent on η at fixed Λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We shall call the region of the parameters space where this happens the confining regime of the solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, in this regime the size of the soliton is dictated by the parameters of the scalar field, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' µ and φ0, regardless of the value of the fermion mass mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Physically, we expect that this would be the case when there exists a hierarchy between the scalar and fermion parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Since this hierarchy is measured by η, we expect that the confining regime exists only when η is larger than a critical value, ηc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' To better clarify this point, we consider again Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (18) for the Fermi momentum, k2 F(ρ) = ω2 Fe−2u(ρ) − mf � 1 − φ(ρ) φ0 �2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (59) In the mf → 0 limit this quantity becomes positive defi- nite and so the fermionic pressure cannot vanish at any finite radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In other words, the radius of the star can be arbitrarily large, provided that mf is sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This is nothing but the well-known fact that a star made of purely relativistic gas does not exist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Hence, if we enter a regime where the fermion bare mass mf is so small that, even after the scalar field has moved away from the false vacuum (where the effective fermion mass is small by construction), the Fermi gas is still relativistic, then the radius of the star grows fast and a small variation in mf produces a big variation in the radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We call this regime the deconfining regime of the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In terms of the dimensionless variables defined above, the mf → 0 limit becomes ˜ωF → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (60) Therefore, we expect that, for a given choice of (Λ, η), the confining regime exists only if ˜ωc F is smaller then a certain value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Using the scaling for ˜ωc F in Table II, this can be translated into the condition Λ1/2 η < C, (61) where C is a constant that has to be determined numeri- cally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' At this point, it is natural to define ηc as the value of η in which Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (61) is saturated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In this way, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (61) becomes η > ηc = CΛ1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (62) To summarize, when η ≳ ηc (confining regime) the size of the soliton near the maximum mass is mostly determined by the properties of the scalar field, whereas it strongly depends on the fermion mass when η ≲ ηc (deconfining regime5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Energy conditions For an energy-momentum tensor of the form T µ ν = diag{−ρ, P1, P2, P3}, (63) the energy conditions take the following form: Weak energy condition: ρ ≥ 0 and ρ + Pi ≥ 0 Strong energy condition: ρ+� i Pi ≥ 0 and ρ+Pi ≥ 0 Dominant energy condition ρ ≥ |Pi|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For a spherically symmetric configuration, P1 = Pr is the radial pressure, while P2 = P3 = Pt is the tangential pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For our model, ρ = U + V + W , (64) Pr = V − U + P , (65) Pt = −U − V + P .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (66) Since, V, W, P are nonnegative quantities, we obtain ρ + Pr ≥ 0 and ρ+Pt ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, the weak and strong energy conditions are satisfied if U + V + W ≥ 0, (67) 3P − 2U + W ≥ 0 , (68) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Since also U is a nonnegative quantity, the weak energy condition is always satisfied, while the strong energy condition can be violated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In particular, it is violated even in the absence of fermions (P = W = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5 Note that, deep in the deconfining regime (when η → 0), the Compton wavelength of the fermion, 1/mf, might become com- parable to or higher than the radius of the star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In this case we expect the Thomas-Fermi approximation to break down.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 9 5 10 15 20 25 30 35 10-8 10-6 10-4 10-2 100 10 20 30 40 10-8 10-6 10-4 10-2 100 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2: Radial profiles of the adimensional pressure ˜P, scalar profile y and metric functions u and v for two example configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Left: Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141, η = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='26, and ˜Pc = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='00903.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The mass and radius of the soliton fermion star are µM/m2 p = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='14 and µR = 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='8, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This solution falls within the confining regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Right: Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141, η = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='996, and ˜Pc = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='0222.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The mass and radius of the soliton fermion star are µM/m2 p = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='71 and µR = 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This solution falls within the deconfining regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The dominant energy condition, instead, gives two inequalities: U + V + W ≥ |P + V − U|, (69) U + V + W ≥ |P − V − U|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (70) One can show that the dominant energy condition is satisfied whenever W + 2(U + V ) ≥ P, (71) This inequality is satisfied if W − P ≥ 0 , (72) which can be shown to be true using the analytic expres- sions of W and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' To sum up, the weak and dominant energy conditions are always satisfied, while the strong energy condition can be violated (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' in the absence of fermions) as generically is the case for a scalar field with a positive potential [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' NUMERICAL RESULTS In this section, we present the fermion soliton solutions in spherical symmetry obtained by integrating the field equations (23).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We will confirm the existence of a solution beyond the thin-wall approximation used in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [13] (example solutions are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Also, based on the numerical solutions, we are able to confirm the scalings derived in the previous sections in a certain region of the parameter space and fix their prefactors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Numerical strategy In this section, we summarise the numerical strategy we adopt to find soliton fermion solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Given the boundary condition (24), the set of equations (23) are solved numerically by adopting the following strategy: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We fix a certain value of ˜Pc;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' for a given value of ˜Pc and of the central scalar field (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', a value of ϵ), we obtain ˜ωF, and therefore x through the last equation in (23);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' for fixed ˜Pc and ϵ, we integrate the first three equa- tions in (23) for the variables (u, v, y), starting from r ≈ 0 to the point r = Rf where the fermion pres- sure drops to negligible values, ˜P(Rf) = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' we eliminate the fermionic quantities from the sys- tem of equations (23) and start a new integration with initial conditions given at r = Rf imposing continuity of the physical quantities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' That is, the initial conditions on the metric and scalar field at r = Rf are obtained from the last point of the previous integration up to r = Rf;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' we use a shooting method to find the value of ϵ that allows an asymptotically-flat solution to exist, which means imposing y(r → ∞) → 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' as previously discussed, because the scalar field does not have a compact support, we define the radius of the star (R > Rf) as that containing 99% of the total mass, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ˜m(R) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='99 µM/m2 p (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (27)), and the compactness is GM/R;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 10 0 10 20 30 40 0 2 4 6 8 10 0 2 4 6 8 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='25 0 5 10 15 20 25 30 35 0 2 4 6 8 10-1 100 101 10-5 10-4 10-3 10-2 10-1 100 101 0 2 4 6 8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='25 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3: Mass-radius (left panels) and compactness-mass (right panels) diagrams for fermion soliton stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The top panels refer to various values of (Λ, η) in the confining regime (η > ηc, see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' III B 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As a reference, in the top-left panel we also draw the lines R = 2 GM, R = 9/4 GM, R = 3 GM, corresponding to the Schwarzschild radius, the Buchdhal’s limit [21], and the photon-sphere radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The bottom panels refer to various values of η for fixed Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The smallest value of η considered is near to but greater than the critical value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The inset shows the curves in logarithmic scale, to highlight that in this case there exists a turning point in the M-R diagram at low masses that proceeds towards the Newtonian limit of small M and large R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, we repeat the procedure for a range of values of ˜Pc, finding a one-parameter family of solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As we shall discuss, in certain regimes (including the deconfining one) this family exists only if ˜Pc is above a certain threshold, therefore lacking a Newtonian limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As already noted, a vanishing scalar field (y = 0, ∂ry = 0) is a solution to the scalar equation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (23) only if S = 0, that is in the absence of fermions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This ensures that in any solution with y → 0 at infinity the fermion pressure must vanish at some finite radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, the fermion soliton solution is described by a fermion fluid confined at r ≤ Rf and endowed with a real scalar field that is exponentially suppressed outside the star, as expected from the discussion in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As described in the previous section, important param- eters are the mass and radius of the critical solutions, Mc and Rc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In practice, we compute these quantities by identifying in the M-R diagram the point of maximum mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Fermion soliton stars First of all, we confirm that fermion soliton stars exist also beyond the thin-wall approximation used in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' An example is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2 which presents the radial profiles for the metric, scalar field, and fermion pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Inspecting both panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 2 can help us understand the qualitative difference between solutions in the con- fining regime (left) and the deconfining one (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the first case, as soon as the scalar field moves away from its central value at ρ → 0, and the effective mass of the fermion field grows, the pressure quickly drops to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This reflects in the fact that the macroscopic size of the star R is found to be very close to where the scalar field 11 100 101 102 10-2 10-1 100 101 102 100 101 102 10-3 10-2 10-1 100 101 102 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 4: Left: Behaviour of the critical radius Rc with Λ and η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The scaling (62) is highlighted by the diagonal black dashed line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We observe an agreement until Λ ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='3 whereas, for larger Λ, ηc increasingly exceeds the predicted value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The horizontal grid-line highlights when the µR > 1 regime ends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The shaded region above the two dashed lines is the confining regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Right: Behaviour of the critical radius Mc with Λ and η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We observe that the critical mass does not exhibit a significant change of behaviour for η < ηc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' starts moving away from the false vacuum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This is the reason why the macroscopic properties of the star are mainly dictated by the scalar field potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the latter case, the small bare mass of fermions makes them remain ultra-relativistic even when the scalar field moves away from the false vacuum, generating a layer where fermionic pressure drops exponentially but remains finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' After the energy of fermions has fallen within the non-relativistic regime, fermionic pressure rapidly vanishes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The exis- tence of such a layer makes the final mass and radius of the star dependent on the fermion mass, see more details below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Also, as the numerical shooting procedure requires matching the asymptotic behavior of the scalar field out- side the region where the energy density of the fermions remains sizeable, deconfining solutions are characterized by a larger tuning of the parameter controlling the central displacement ϵ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3 we present the mass-radius and compactness- mass diagrams for various values of Λ and η, in the confin- ing regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the top panels, we observe that Λ strongly affects the mass-radius scale and the maximum mass, while from the bottom panels we observe that η has a weaker impact on the maximum mass, as expected from the discussion in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The dependence of Mc and Rc on Λ and η is presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As expected, we observe that, for a fixed Λ, there is a critical value of η, below which the radius begins to grow rapidly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For η > ηc and Λ ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5, we observe that the predictions given in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' III are valid, confirming the existence of a confining regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, in that region of the parameter space, both the mass and radius have a little dependence on η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This dependence grows very slowly for increasing value of η, in agreement with Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (58).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Moreover, the value of ηc scales, for Λ ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='3, in agreement with Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (62), while for larger values of Λ it exceeds the analytical scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' At variance with the critical radius, the critical mass does not exhibit a change of behaviour for η < ηc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As a consequence, the compactness decreases quickly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, in Table III we report the scaling coefficients computed numerically, which are valid in the confining regime (η ≳ ηc, Λ ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' On the existence of a Newtonian regime From the bottom panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3, we observe that, even though η has a weak impact on the maximum mass, it can qualitatively change the M −R diagram, especially at low masses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Overall, the mass-radius diagram reassembles that of solitonic boson stars [15–18] with several turning points in both the mass and the radius, giving rise to multiple branches (see also [22]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The main branch is the one with M ′(R) > 0 before the maximum mass, which is qualitatively similar to that of strange (quark) stars [23, 24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' However, the low-mass behavior (and the existence of a Newtonian regime) depends strongly on η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For sufficiently large values of η (always in the confining regime) there exists a low-compactness branch in which M ′(R) < 0 and where the fermionic pressure is small compared to the energy density, giving rise to a Newtonian regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' However, an interesting effect start occurring for values of η near to, but greater than, the critical one (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', the blue curve for η = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='26 in the bottom panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 36) 6 Notice that, in the bottom left panel, it is not possible to see the complete tail of the M-R diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As underlined in the text, in the center right panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5 we plot the complete M-R diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 12 TABLE III: Various scaling of the critical parameters with coefficients derived numerically in the Λ ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5 range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Critical mass µMc/m2 p ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='19/Λ2 Critical radius µRc ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='71/Λ2 Compactness of the critical solution Cc ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='27 Critical value of the scale ratio ηc ≈ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='7 Λ1/2 all the way down to the deconfining regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In this case, there is still a lower turning point in the M-R diagram, but the compactness eventually starts growing (see right bottom panel).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In this case there is no Newtonian regime, since the compactness is never arbitrarily small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This peculiar behavior is also related to another im- portant feature of the model, namely the fact that, for η sufficiently small, fermion soliton stars exist only above a minimum threshold for the central fermionic pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We clarify this point in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the left panels we show the mass of the star as a function of the central fermionic pressure for Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141 and three values of η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For η = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='966 and η = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='26 (top and center panels), the pressure has a lower bound, corresponding to the absence of a Newtonian limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For η = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='92 (bottom panels) the behavior is qualitatively different and in this case the Newtonian regime is approached as Pc → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' To clarify where the minimum pressure and these mul- tiple branches are in the mass-radius diagram, in the right panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5, we show data points for M − R using the same color scheme as in the corresponding left panels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Interestingly, the minimum pressure does not correspond to the minimum mass in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5, but it is an intermediate point in the M − R diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the center right panel we show an extended version of the Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141, η = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='26 curve shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This highlights the peculiar behavior of the new branch, which has a further turning point at large radii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Studying the stability of these different peculiar branches [22] is left for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, note that in both cases there are values of the central fermionic pressure corresponding to multiple solutions, each one identified by a different central value of the scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' PARAMETER SPACE AND ASTROPHYSICAL IMPLICATIONS Given the number of parameters of our model, it is interesting to study the characteristic mass and radius of fermion soliton stars in this theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' By defining q ≡ (µφ2 0)1/3, (73) as long as we are in the confining regime, one finds Mc ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='19 8π m4 p q3 ∼ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='27 M⊙ � q 5 × 105 GeV �−3 , (74) Rc ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='71 8π m2 p q3 ∼ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5 km � q 5 × 105 GeV �−3 , (75) where we included the prefactors obtained using the nu- merical results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Given the cubic dependence on q, the model can accommodate compact objects of vastly dif- ferent mass scales, while the compactness at the maxi- mum mass is independent of q, GMc/Rc ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='27, which is slightly larger than that of a typical neutron star, but still smaller than the compactness of the photon sphere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' As a consequence, one expects fermion soliton stars to display a phenomenology more akin to ordinary neutron stars than to black holes [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The authors of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [13] considered the value q = 30 GeV, yielding supermassive objects with Mc ∼ 1012 M⊙ and Rc ∼ 1013 km ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='3 pc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Instead, the choice q = qastro ∼ 5 × 105 GeV (76) leads to the existence of soliton solutions of mass and radius comparable to ordinary neutron stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Furthermore, the fact that the model is in the confining regime only above a critical value of η, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (62), implies (using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (22) and our numerical results): mf > 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='7 �√ 8πq3 mp �1/2 ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='6 GeV � q qastro �3/2 , (77) a range including the neutron mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, the fermion gas can be a standard degenerate gas of neutrons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' It is also interesting to combine the above inequality (sat- urated when mf = mc f) with Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (74), finding a relation between the maximum mass of the soliton in the confining regime and the critical fermion mass, Mc ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='46 � GeV mc f �2 M⊙ , (78) independently of q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Interestingly, this model allows for subsolar compact objects for fermions at (or slightly heav- ier than) the GeV scale, whereas it allows for supermassive (Mc ∼ 106M⊙) compact stars for a degenerate gas of elec- trons (mc f ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5 MeV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Clearly, the same value of q can be obtained with dif- ferent combinations of µ and φ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In general, µ = 500 � q qastro �3 �500 TeV φ0 �2 TeV (79) = 500 � mc f 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='6 GeV �2 �500 TeV φ0 �2 TeV , (80) so µ ∼ GeV for q = qastro (or, equivalently, for mc f = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='6 GeV) and φ0 ∼ 3 × 105 TeV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Note that the latter value is still much smaller than the Planck scale, so the condition Λ ≪ 1 is satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' From our numerical results, Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (74)–(75) are valid as long as Λ ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5 whereas, for larger values of Λ, Mc, Rc, and Cc decrease rapidly and 13 10-2 10-1 100 101 102 103 4 5 6 7 8 9 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='05 4 5 6 7 8 9 10 20 40 60 80 100 120 140 4 5 6 7 8 9 10 10-2 10-1 100 101 102 0 2 4 6 8 10 20 40 60 80 100 120 0 2 4 6 8 10 10-4 10-3 10-2 10-1 100 10-2 10-1 100 101 0 5 10 15 20 25 30 35 0 2 4 6 8 10 0 10 20 30 40 10-2 10-1 100 101 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5: Left panels: The mass of fermion soliton stars as a function of the central fermionic pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Right panels: The corresponding mass-radius diagram using the same color scheme as in the left panels, in order to associate to each point the corresponding central pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Top: Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141 and η = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This solution is in the deconfining regime and there is a lower bound on ˜Pc below which no solution exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Center: Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141 and η = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This solution is in the confining regime but, also in this case, there exists a lower bound on ˜Pc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Bottom: Λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='141 and η = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This solution is in the confining regime but, given the larger value of η, there is no lower bound on ˜Pc and a Newtonian regime exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In all three cases, for a certain range of ˜Pc there are multiple solutions with the same central fermionic pressure and different central value of the scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' the condition µR ≫ 1 might not hold (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This gives an upper bound on φ0 φ0 ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5 √ 8π mp ∼ 1018 GeV, (81) which, using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (79), can be translated into a lower bound on µ µ ≳ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='4 × 10−11 � q qastro �3 eV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (82) 14 Thus, also the scalar-field mass can vastly change depend- ing on the value of q, reaching a lower limit that can naturally be in the ultralight regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, in the deconfining regime there is no minimum fermion mass so solutions can exist also beyond the range dictated by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (77), but soliton fermion stars in such a regime would be characterized by smaller values of the compactness (see discussion in Sec IV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' CONCLUSIONS We have found that fermion soliton stars exist as static solutions to Einstein-Klein-Gordon theory with a scalar potential and a Yukawa coupling to a fermion field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This confirms the results of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [13] obtained in the thin-wall approximation and provides a way to circumvent the no- go theorems [10, 11] for solitons obtained with a single real scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Focusing on spherical symmetry, we have explored the full parameter space of the model and derived both an- alytical and numerical scalings for some of the relevant quantities such as the critical mass and radius of a fermion soliton star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Interestingly, the model predicts the exis- tence of solutions in the subsolar/solar (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' supermas- sive) range for a standard gas of degenerate neutrons (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' electrons).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We also unveiled the existence of a confining and decon- fining regime – where the macroscopic properties of the soliton are mostly governed by the scalar field parameters or by the fermion mass, respectively – and the fact that no Newtonian analog exists for these solutions for fermion masses below a certain threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Extensions of our work are manifold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' First of all, for simplicity, we have focused on a scalar-fermion coupling tuned to provide an almost vanishing effective fermion mass in the stellar core.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This assumption imposes f = mf/φ0, a condition that can be relaxed, thus increasing the dimensionality of the parameter space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We have also considered a scalar potential with two degenerate minima.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A straightforward generalization is to break this degeneracy and allow for a true false-vacuum potential in which the scalar field transits from the false-vacuum state inside the star to the true-vacuum state at infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' From the point of view of the fundamental theory, it would be interesting to investigate an embedding within the Standard Model and beyond, also including gauge fields (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', see Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [25] for a recent attempt along this direction).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, although we focused on static and spherically symmetric solutions, there is no fundamental obstacle in considering spinning configurations and the dynamical regime, both of which would be relevant to study the phenomenology of fermion soliton stars, along the lines of what has been widely studied for boson stars [9] and for mixed fermion-boson stars [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In particular, due to the existence of multiple branches [22] and the absence of a Newtonian limit in certain cases, an interesting study concerns the radial linear stability of these solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We hope to address these points in future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ACKNOWLEDGMENTS We thank Enrico Barausse, Mateja Bošković, and Mas- simo Vaglio for useful conversations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ac- knowledge financial support provided under the Euro- pean Union’s H2020 ERC, Starting Grant agreement no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' DarkGRA–757480 and under the MIUR PRIN pro- gramme, and support from the Amaldi Research Cen- ter funded by the MIUR program “Dipartimento di Ec- cellenza" (CUP: B81I18001170001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The research of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' was supported in part by the MIUR under con- tract 2017 FMJFMW (“New Avenues in Strong Dynam- ics,” PRIN 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This work was supported by the EU Horizon 2020 Research and Innovation Programme un- der the Marie Sklodowska-Curie Grant Agreement No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 101007855.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Appendix A: Connection with scalar-tensor theories In this appendix we discuss whether the model for fermion soliton stars presented in the main text can also arise in the context of a scalar-tensor theory of gravity (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', [27] for a review on modified theories of gravity).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the so-called Jordan frame7, where gravity is mini- mally coupled to matter fields, scalar-tensor theories are described by the action (see for example [28]) ˆS = � d4x √−ˆg 16πG � F(ˆφ) ˆR − Z(ˆφ)ˆgµν∂µ ˆφ∂ν ˆφ − ˆU(ˆφ) � + ˆSm( ˆψm;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' ˆgµν) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A1) The coupling functions F and Z single out a particular theory within the class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For example, Brans-Dicke theory corresponds to F = ˆφ and Z = ω0/ˆφ, where ω0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' We can write the theory in an equivalent form in the so-called Einstein frame, where gravity is minimally cou- pled to the scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' For this purpose, we perform a conformal transformation of the metric, ˆgµν = A2(φ)gµν with A(φ) = F −1/2(ˆφ), a field redefinition, φ = φ(ˆφ), and a conformal rescaling of the matter field, ˆψm → ψm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The scalar field φ is now minimally coupled to gµν whereas ψm is minimally coupled to ˆgµν [28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' The energy-momentum tensor is Tµν = A2(φ) ˆTµν whereas the scalar potential becomes U(φ) = ˆU( ˆφ) 16πGF 2( ˆφ) 7 In this appendix we used a hat to denote quantities in the Jordan frame, whereas quantities without the hat refer to the Einstein frame where gravity is minimally coupled to the scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 15 The scalar field equation in the Einstein frame reads □φ = −T d log A(φ) dφ + ∂U ∂φ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A2) Since in our theory (1) the scalar field is minimally coupled to gravity, it is natural to interpret it in the context of the Einstein frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, we can compare Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A2) to the second equation in (11): □φ = −fS + ∂U ∂φ , (A3) which, using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (8), can be written as □φ = f (mf − fφ)T + ∂U ∂φ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A4) Therefore, if we identify d log A(φ) dφ = −f (mf − fφ) = 1 φ − φ0 , (A5) the scalar equation of our model is the same as in a scalar-tensor theory with coupling A(φ) in the Einstein frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Integrating this equation yields (henceforth as- sumig A(0) = 1), A(φ) = 1 − φ φ0 = meff mf .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A6) Interestingly, the matter coupling vanishes when φ ≈ φ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' It is left to be checked if the gravitational sector of our model is equivalent to that of a scalar-tensor theory with A(φ) given by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Let us consider a degenerate gas of noninteracting fermions with mass mf in the Jordan frame, with energy-momentum ˆT µν = ( ˆW + ˆP)ˆuµˆvν + ˆgµν ˆP (A7) where, assuming spherical symmetry, ˆW(ˆρ) = 2 (2π)3 � ˆkF (ˆρ) 0 d3k � k2 + m2 f ˆP(ˆρ) = 2 (2π)3 � ˆkF (ˆρ) 0 d3k k2 3 � k2 + m2 f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A8) In spherical symmetry, since the spacetime has the same form as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (13), it is straightforward to minimize the energy of the fermion gas at fixed number of fermions (the calculation is exactly the same as the one done to obtain Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (18)): ˆk2 F = ˆω2 F e−2ˆu − m2 f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A9) It is important to notice that in the standard scalar- tensor theory in the Jordan frame there is no Yukawa interaction, therefore the fermion particles do not acquire any effective mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the Einstein frame, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A7) simply reads T µν = (W + P)uµuν + gµνP , (A10) where W = A4(φ) ˆW and P = A4(φ) ˆP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, also in the Einstein frame we have a perfect fluid in the form of a zero-temperature Fermi gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Let us now compute the expressions of W and P explicitly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' First of all, from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A8), following the same computation presented in the main text, we get ˆW = m4 f 8π2 � ˆx � 1 + ˆx2(1 + 2ˆx2) − log � ˆx + � ˆx2 + 1 �� ˆP = m4 f 8π2 � ˆx �2 3 ˆx2 − 1 �� 1 + ˆx2 + log � ˆx + � ˆx2 + 1 �� (A11) where ˆx = ˆkF /mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Since A(φ) = meff/mf, we obtain W = m4 eff 8π2 � ˆx � 1 + ˆx2(1 + 2ˆx2) − log � ˆx + � ˆx2 + 1 �� P = m4 eff 8π2 � ˆx �2 3 ˆx2 − 1 �� 1 + ˆx2 + log � ˆx + � ˆx2 + 1 �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (A12) Note that W(ˆx) and P(ˆx) above implicitly define an equa- tion of state that is exactly equivalent to that obtained from W and P in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21a)– (21b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' This shows that our model can be interpreted as a scalar-tensor theory in the Einstein frame with coupling to matter given by8 A(φ) = meff/mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Furthermore, note that the dimensionless quantity ˆx = ˆkF /mf = kF /meff = x is invariant under a change from the Jordan to the Einstein frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Therefore, W and P are exactly those given in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21a)–(21b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Finally, ˆS in the Jordan frame reads ˆS = 2 (2π)3 � ˆkF 0 d3k mf � k2 + m2 f = m3 f 2π2 � ˆx � 1 + ˆx2 − log � ˆx + � ˆx2 + 1 �� (A13) while in the Einstein frame9 S = A3 ˆS = m3 eff 2π2 � x � 1 + x2 − log � x + � x2 + 1 �� , (A14) 8 Note that our model and the scalar-tensor theory are not exactly equivalent to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Indeed, while in the scalar-tensor theory any matter field is universally coupled to A(φ)ˆgµν, in our model this is the case only for the fermion gas, while any other matter field is minimally coupled to the metric, in agreement with the fact that our model is based on standard Einstein’s gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 9 The fact that S = A3 ˆS can be derived from the condition A4(φ) ˆT = T ⇒ A4(φ)mf ˆS = meffS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 16 since ˆx = x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Thus, also in this case we obtain the same expression as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' (21c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Having assessed that our model can be interpreted in the context of a scalar-tensor theory, it is interesting to study the latter in the Jordan frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In particular, since A(φ) = 1 � F(ˆφ) , (A15) and A(φ) = 1 − φ/φ0, the coupling function F(ˆφ) is singular in ˆφ(φ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' In the language of the scalar-tensor theory, we see that in the core of a fermion soliton star, where φ ≈ φ0 and matter is almost decoupled in the Einstein frame, the scalar field in the Jordan frame is strongly coupled to gravity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Wheeler, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 97, 511 (1955).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [2] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Herdeiro and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Radu, Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 24, 1542014 (2015), arXiv:1504.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='08209 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [3] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Kaup, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 172, 1331 (1968).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [4] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Ruffini and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Bonazzola, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 187, 1767 (1969).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [5] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Colpi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Shapiro, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Wasserman, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 57, 2485 (1986).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [6] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Coleman, Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' B 262, 263 (1985), [Adden- dum: Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='B 269, 744 (1986)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [7] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Jetzer, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 220, 163 (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [8] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Schunck and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Mielke, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 20, R301 (2003), arXiv:0801.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='0307 [astro-ph].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [9] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Liebling and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Palenzuela, Living Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 15, 6 (2012), arXiv:1202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='5809 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [10] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Derrick, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 5, 1252 (1964).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [11] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Herdeiro and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Oliveira, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 36, 105015 (2019), arXiv:1902.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='07721 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [12] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Seidel and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Suen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 66, 1659 (1991).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [13] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Lee and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pang, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 35, 3678 (1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [14] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Cardoso and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pani, Living Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 22, 4 (2019), arXiv:1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='05363 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [15] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Friedberg, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Lee, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pang, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 35, 3658 (1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [16] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Palenzuela, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pani, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Bezares, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Cardoso, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Lehner, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Liebling, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 96, 104058 (2017), arXiv:1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='09432 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [17] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Bezares, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Bošković, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Liebling, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Palenzuela, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pani, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Barausse, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 105, 064067 (2022), arXiv:2201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='06113 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [18] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Bošković and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Barausse, JCAP 02, 032 (2022), arXiv:2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='03870 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [19] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Shapiro and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Teukolsky, Black holes, white dwarfs, and neutron stars: The physics of compact objects (1983).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [20] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Lee and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pang, Physics Reports 221, 251 (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [21] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Buchdahl, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 116, 1027 (1959).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [22] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Guerra, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Macedo, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Pani, JCAP 09, 061 (2019), [Erratum: JCAP 06, E01 (2020)], arXiv:1909.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='05515 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [23] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Alcock, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Farhi, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Olinto, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 310, 261 (1986).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [24] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Urbano and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Veermäe, JCAP 04, 011 (2019), arXiv:1810.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='07137 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [25] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Endo, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Ishihara, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Ogawa, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 105, 104041 (2022), arXiv:2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='09709 [hep-th].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [26] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Valdez-Alvarado, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Palenzuela, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Alic, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Ureña López, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' D 87, 084040 (2013), arXiv:1210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='2299 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [27] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Berti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=', Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 32, 243001 (2015), arXiv:1501.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='07274 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' [28] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Sotiriou and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Faraoni, Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content=' 82, 451 (2010), arXiv:0805.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} +page_content='1726 [gr-qc].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/SNFAT4oBgHgl3EQf1h4R/content/2301.08709v1.pdf'} diff --git a/TNE5T4oBgHgl3EQfAQ5w/vector_store/index.pkl b/TNE5T4oBgHgl3EQfAQ5w/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e66d04042a1e7ba2f1e12114a17166f18ecda652 --- /dev/null +++ b/TNE5T4oBgHgl3EQfAQ5w/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b4db2e36c373c0f1b49b970f7a389c3d0ed98e7a64fa5f3f87204a9e5569cc2 +size 89378 diff --git a/TdFKT4oBgHgl3EQfki6p/content/tmp_files/2301.11850v1.pdf.txt b/TdFKT4oBgHgl3EQfki6p/content/tmp_files/2301.11850v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..08442f2f80c6cd05b0ade0b8d5145d12fa884e18 --- /dev/null +++ b/TdFKT4oBgHgl3EQfki6p/content/tmp_files/2301.11850v1.pdf.txt @@ -0,0 +1,869 @@ +Factual or Biased? Predicting Sentence-Level Factuality and Bias of News +Francielle Vargas1,2, Fabiana Góes1, Thiago Pardo1, Fabrício Benevenuto2 +1Institute of Mathematical and Computer Sciences, University of São Paulo, Brazil +2Computer Science Department, Federal University of Minas Gerais, Brazil +francielleavargas@usp.br, fabianagoes@usp.br, +taspardo@icmc.usp.br, fabricio@dcc.ufmg.br +Abstract +We present a study on sentence-level factual- +ity and bias of news articles across domains. +While prior work in NLP has mainly focused +on predicting the factuality of article-level +news reporting and political-ideological bias +of news media, we investigated the effects of +framing bias in factual reporting across do- +mains so as to predict factuality and bias at the +sentence level, which may explain more accu- +rately the overall reliability of the entire doc- +ument. First, we manually produced a large +sentence-level annotated dataset, titled Fact- +News, composed of 6,191 sentences from 100 +news stories by three different outlets, result- +ing in 300 news articles. Further, we studied +how biased and factual spans surface in news +articles from different media outlets and differ- +ent domains. Lastly, a baseline model for fac- +tual sentence prediction was presented by fine- +tuning BERT. We also provide a detailed anal- +ysis of data demonstrating the reliability of the +annotation and models. +1 +Introduction +While journalism is tied to a set of ethical standards +and values, including truth, fairness and impartial- +ity, it often strays from objective facts. As a result, +biased news are produced (Mastrine, 2022) with +relevant potential to influence the public’s percep- +tion (Hamborg, 2020). Bias, according to Recasens +et al.(2013), is linked to lexical and grammatical +cues, identified by the literature on subjectivity. +Framing bias (Recasens et al., 2013; Entman, 2007) +is composed by subjective words or phrases linked +to a particular point of view. In contrast, factuality +is linked to impartiality, identified by the literature +on objectivity. Most researchers address media bias +and factuality either at the level of media outlet +(Baly et al., 2018) or at the level of individual arti- +cle (Roy and Goldwasser, 2020; Baly et al., 2020). +Nevertheless, each article itself comprises multiple +sentences, which vary in their embedded bias (Lim +et al., 2020), as shown in the example in Table 1. +N. +Sentence-level news article +Label(s) +Title Food inflation has shown greater resis- +tance than that of so-called durable +biased +S1 +Brazil had deflation for the third consecu- +tive month +factual +S2 +In September, prices measured by the +IPCA fell, on average, 0.29% +factual +S3 +In the last 12 months, the IPCA recorded +inflation of 7.17% +factual +S4 +Food inflation has shown more resistance +than that of so-called durable products +factual +S5 +Food inflation still distorts our perception +of prices, and the economy +biased +S6 +"Today we are trying to survive.", said a +customer at the supermarket. +quotes +Table 1: Sentence-Level Factuality and Bias. +As shown in Table 1, biased sentences present +subjectivity markers (e.g. greater - Title), or the +point of view of a journalist (e.g. S5) and may +influence readers’ perception. There are also direct +quotes, which are neither biased sentences nor fac- +tual sentences. Therefore, the news media sources +surely affect the power of swaying public opinion +through the practical limitation to neutrality and ob- +jectivity, or using deliberate attempts to go against +or in favor of something or someone. +Bias can be broadly categorized into two classes: +framing and epistemological (Recasens et al., +2013). In general, framing bias is more explicit +than epistemological bias (Bordia and Bowman, +2019). Framing bias occurs when subjective or +opinion-based words are used. For instance, “Ter- +rorists are horrible and prejudiced people", the +words “horrible” and ‘’prejudiced” show an evalu- +ation from the writer’s point of view. On the other +hand, epistemological biases are entailed, asserted, +or hedged in the text. For example, in the sen- +tence “Kuypers claimed that the mainstream press +in America tends to favor liberal viewpoints,” the +word claimed has a doubtful effect on Kuypers’s +1 +arXiv:2301.11850v1 [cs.CL] 27 Jan 2023 + +statement as opposed to stated in the sentence — +“Kuypers stated that the mainstream press in Amer- +ica tends to favor liberal viewpoints” (Bordia and +Bowman, 2019). +To fill this important research gap and mitigate +this indisputably relevant social problem, we ad- +dressed both biased and factual sentences predic- +tion by using a strategy that has proved to be +effective: we created a new dataset titled Fact- +News composed of 6,191 sentences from 300 news +documents. The same news report was extracted +from three different journalistic vehicles from dif- +ferent domains. Furthermore, each sentence of +the dataset was annotated according to three dif- +ferent classes: (a) factual spans, which consists +of sentences presented with impartiality focused +on objective facts; (b) biased spans, consisting +of subjective sentences that stray from the objec- +tive and break the commitment to impartiality, and +for biased spans annotations were done according +to 16 types of media bias proposed by AllSides +(Mastrine, 2022); additionally, (c) quotes, direct +statements often followed by quotation marks that +journalist in general uses to report the speech of +someone involved in the reported event. Then, +we trained two different models using fine-tuned +BERT. The first model predicts whether the sen- +tence is factual or not. The second model predicts +whether the sentence is biased or not, which ad- +vancing in the literature, present results for differ- +ent domains besides political domain. As a result, +a baseline model for sentence-level factuality pre- +diction was proposed, and a sentence-level media +bias model across domains. The contributions of +this study are summarized as follows: +• We focused on an under-explored but surely +relevant problem: predicting factuality of +news. We further study sentence-level media +bias, which is definitely under-explored. +• We created a large and manually-annotated +dataset of news articles at the sentence-level +for both tasks. The dataset and code are avail- +able, which may facilitate future research. +• We presented a baseline model for sentence- +level factuality prediction. +• We provided data analysis on factual and bi- +ased sentences demonstrating the reliability +of the annotation schema and models. +2 +Related Work +Article-level media bias consists in predicting +whether an entire news report is biased. Most of +the proposals have contemplated text-based meth- +ods in order to measure news ideology, similar to +those proposed by Sapiro-Gheiler (2019). In the +same settings, Iyyer et al. (2014) predicted po- +litical ideology using recursive neural networks. +Baly et al. (2019) proposed a multi-task regression +framework aiming to predict the trustworthiness +and ideology of news media. Liu et al. (2022) +applied the pre-trained language model for the po- +litical domain so as to characterize political stance. +Baly et al. (2020) created a model from learning +media sources such as a shortcut for predicting ide- +ology using adversarial networks. In this paper, we +focus on sentence-level media bias across domains +besides providing baselines for sentence-level fac- +tuality prediction. +Sentence-level media bias consists of a task +aiming to predict whether each sentence of a news +report is biased or not. It is an under-explored issue +in the area. Fan et al. (2019) provided the first +sentence-level annotated dataset titled BASIL, com- +posed of 300 news articles annotated with 1,727 +biased spans and 6,257 non-biased sentences, as +well as fine-tuning BERT baseline experiments +reaching an F1-Score of 47,27%. In Lim et al. +(2020) a new dataset titled biased-sents was cre- +ated, composed of 966 sentences from 46 English- +language news articles covering 4 different events. +Spinde et al. (2021) provided an annotation-expert +project through a new dataset titled BABE. It con- +sists of 3,700 sentences balanced among topics +and outlets, and a new fine-tuned BERT baseline +reaching an F1-Score of 80,04%. +Finally, Lei +et al.(2022) showed that embedded discourse struc- +ture for sentence-level media bias effectively in- +creases the recall of bias sentence identification by +8.27% - 8.62%, and precision by 2.82% - 3.48%. +Factuality consists of a task for predicting +whether a news report is factual or not. This task +is definitely under-explored by the literature. Baly +et al. (2018) provided the first study on predicting +the article-level factuality of reporting and bias of +news media. They present the characterization of +the entire news media. In this paper, we propose +the Sentence-level factuality task, which consists +on predicting whether a sentence is factual or not +in news articles. We further provide a study on +sentence-level factuality and bias in news articles. +2 + +3 +FactNews Dataset +We collected, annotated, and hence proposed the +dataset titled FactNews. It is a sentence-level an- +notated dataset that contains 6,191 annotated sen- +tences, as follows: 4,302 sentences are factual +spans; 1,389 sentences are quotes, and 558 sen- +tences are biased spans. A dataset overview is +shown in Table 2. +Data Collection: FactNews was collected from +100 news reports in triples - the same story from 3 +different Brazilian media news outlets - (e.g. Folha +de São Paulo1, O Globo2, and Estadão3), resulting +in 300 documents. Using a statistical approach +and a search algorithm, we collected news articles +related to 6 different domains (e.g. politics, world, +daily life, sports, science, and culture) from periods +2006-2007 and 2021-2022 . In accordance with +relevant literature of the area, we selected three +news articles from different news outlets about the +same topic or story. +Data Annotation: Corroborating our objective +of classifying factuality and bias at the sentence +level, we segmented each news article into sen- +tences and annotated them according to three dif- +ferent classes: (a) factual spans, (b) biased spans, +and (c) quotes. To classify biased spans, we pro- +posed an annotation schema based on 16 types of +media bias proposed by AllSides (Mastrine, 2022) +(e.g. +sensationalism, slant, opinion statements, +spin, etc.). Two different annotators from different +regions (southeast and north east) performed the +task, a linguist and a computer scientist, both with +at least a Ph.D. degree or Ph.D. candidate status. +Data Evaluation: We computed inter-annotator +agreement using reliable literature metrics: Co- +hen’s kappa. We obtained a Cohen’s kappa of +94,42%. The annotation process was performed by +two different annotators and disagreement cases +were judged by two different judges. +In addi- +tion, two different rounds of reviews were car- +ried out, in which annotators could discuss doubts +and re-evaluate the given labels. +The agree- +ments/disagreements data is available at (BLIND). +Data Analysis: Table 2 shows a summary of +FactNews dataset statistics. Most of the sentences +(68.51%) are factual spans. In contrast, the quotes +and biased categories compose 22.52% and 8.81% +of all labels, respectively. On average, each news +1https://www.folha.uol.com.br/ +2https://oglobo.globo.com/ +3https://www.estadao.com.br/ +article is composed of 14.14 factual sentences, 3.27 +biased sentences, and 7.06 quote sentences, being +20.36 words in factual sentences, 22.14 words in +biased sentences, and 17.38 words in quotes. In +general, biased spans present more words than fac- +tual spans at all grammar categories. Furthermore, +emotions are more predominant in biased spans +than factual spans. Lastly, the titles of news articles +hold 8.36% of bias, 5.33% of quotes, and 86% of +factuality. On the other hand, the body of news ar- +ticles holds 13.35% of bias, 20.38% of quotes and +66.27% of factuality. Figure 1 shows the distribu- +tion of factual and biased sentences across domains +according to each media news outlets. Notably, the +distribution of factuality is equivalent across differ- +ent domains . Differently, the distribution of bias +varies in accordance with the domain and media +outlets. +Figure 1: +Distribution of factual and biased spans +across domains from different media news sources. +4 +Baseline Experiments +We introduce a strong baseline sentence-level factu- +ality prediction model for FactNews. We study the +factuality prediction problem as a binary classifica- +tion task (i.e., whether a sentence is factual or not), +as shown in Figure 2. We further train a model +for sentence-level media bias task (i.e., whether a +sentence is biased or not), which advancing in the +literature, rather than focusing on political domain, +we presents results for different domains besides +the political. Both results are shown in Table 3. +Figure 2: Sentence-level factuality prediction. +Note that we assume that factual consists of in- +formation that deals with facts. In other words, it is +a type of information presented with impartiality fo- +cused on objective facts, in contrast to non-factual +that break the commitment to impartiality. +3 + +1200 - +Folha de Sao Paulo +Sentences +Estadao +400 - +O Globo +pallical worid daily life sports aulbure science120 +Folha de Sao Paulo + Sentences +Estadao +140 +O Globo +pase!g +区 +0 +pliical worid daily life sports aulbure sienceclass variable +Sentences +Factual +No-Factual +(factual spans) +(biased spans+quotes)Description +Folha de São Paulo +Estadão +O Globo +All +factual +quotes +biased +factual +quotes +biased +factual +quotes +biased +#Articles +100 +100 +100 +300 +#Sentences +1,494 +450 +231 +1,428 +483 +182 +1,320 +458 +145 +6,191 +#Words +30,374 +7,946 +5,177 +30,589 +8,504 +4,002 +25,505 +7,740 +3,195 +123,032 +Avg Sentences/Article +14.94 +7.03 +3.78 +14.28 +7.00 +3.19 +13.20 +7.15 +2.84 +8.15 +Avg Words/Sentences +20.33 +17.65 +22,41 +21,45 +17,60 +21,98 +19,32 +16,89 +22,03 +19,96 +Body/Title +Body +1,337 +440 +207 +1,218 +473 +162 +1,089 +441 +131 +5,498 +Title +157 +10 +24 +210 +10 +20 +231 +17 +14 +693 +Domains +Political +912 +340 +130 +870 +352 +106 +748 +351 +64 +3,873 +World +224 +48 +31 +224 +49 +27 +216 +32 +29 +880 +Sports +100 +23 +34 +124 +25 +29 +98 +18 +39 +490 +Daily Life +132 +11 +2 +98 +7 +4 +148 +7 +4 +413 +Culture +98 +26 +32 +72 +42 +15 +77 +45 +5 +412 +Science +28 +2 +2 +40 +8 +1 +33 +5 +4 +123 +Part-of-speech +(Avg) +Noun +4.85 +4.09 +5.72 +5.21 +4.12 +5.60 +4.59 +3.82 +5.19 +4.79 +Verb +2.20 +2.55 +2.60 +2.28 +2.51 +2.53 +2.00 +2.44 +2.57 +4.18 +Adjective +1.03 +1.03 +1.32 +1.11 +1.08 +1.32 +0.94 +0.97 +1.48 +1.14 +Adverb +0.67 +0.82 +0.93 +0.67 +0.94 +0.90 +0.59 +0.90 +0.94 +0.81 +Pronoun +0.52 +1.02 +0.73 +0.51 +0.97 +0.56 +0.47 +0.90 +0.59 +0.69 +Conjunction +0.51 +0.55 +0.61 +0.54 +0.57 +0.73 +0.51 +0.88 +0.70 +0.62 +Emotion +(Avg) +Happiness +0.12 +0.22 +0.20 +0.16 +0.28 +0.26 +0.13 +0.28 +0.22 +0.20 +Disgust +0.03 +0.06 +0.05 +0.04 +0.06 +0.03 +0.04 +0.04 +0.04 +0.04 +Fear +4.18 +3.80 +4.63 +4.41 +3.77 +4.56 +4.05 +3.60 +4.50 +4.16 +Anger +0.05 +0.06 +0.13 +0.07 +0.07 +0.12 +0.06 +0.08 +0.20 +0.09 +Surprise +0.01 +0.03 +0.03 +0.01 +0.03 +0.05 +0.01 +0.02 +0.01 +0.02 +Sadness +5.86 +5.71 +6.52 +6.17 +5.55 +6.48 +5.56 +5.40 +6.19 +5.93 +Polarity +(Avg) +Positive +2.41 +3.25 +2.93 +2.55 +3.22 +2.95 +2.26 +3.26 +2.96 +2.86 +Negative +0.05 +0.06 +0.05 +0.07 +0.10 +0.09 +0.06 +0.07 +0.06 +0.06 +Neutral +9.55 +9.77 +10.93 +9.92 +9.52 +11.03 +8.91 +9.28 +10.56 +9.94 +Table 2: Dataset Statistics. +Model Architecture and Settings: +In data +preparation, we only removed special characters. +As learning method, we used the SVM with linear +kernel, and balanced classes using the undersam- +pling. We split our data to train (90%), validation +(10%) and applied the 10-fold cross-validation. We +implemented a set of experiments using 4 differ- +ent model architectures: (i) BERT fine-tuning (best +model): we held batch size at 64, maximum of +500 features, learning rate at 2e-05 and number of +epochs at 4; (ii) subjective-lexicons: we evaluated +features based on sentiment and emotion lexicons +(Pasqualotti, 2008), which present semantic po- +larity and emotion types; (iii) part-of-speech: we +evaluated features based on part-of-speech, more +precisely, noun, verb, adjective, adverb, pronoun +and conjunctions, supported by a pos tagging from +spacy library; and (iv) TF-IDF: baseline VSM us- +ing term frequency–inverse document frequency. +4.1 +Results +Table 3 summarizes the results. For sentence-level +factuality prediction, the baseline model obtained +87% of F1. For sentence-level media bias in dif- +ferent domains besides political domain, the best +model obtained 67% of F1. Notably, the part-of- +speech model also presented relevant results for +both tasks in contrast to the subjective-lexicons. +Sentence-Level Factuality +Precision +Recall +F1-Score +BERT fine-tuning +0.87 +0.87 +0.87 +Part-of-speech +0.77 +0.77 +0.76 +TF-IDF +0.81 +0.69 +0.66 +Polarity-lexicon +0.63 +0.62 +0.62 +Emotion-lexicon +0.61 +0.61 +0.61 +Sentence-Level Media Bias +Precision +Recall +F1-Score +BERT fine-tuning +0.70 +0.68 +0.67 +Part-of-speech +0.67 +0.66 +0.66 +Polarity-lexicon +0.50 +0.50 +0.50 +Emotion-lexicon +0.53 +0.52 +0.50 +TF-IDF +0.78 +0.58 +0.48 +Table 3: The table shows our baseline for the sentence- +level factuality prediction, as well as results for the +sentence-level media bias prediction across domains. +5 +Conclusions +We present a study on factuality and bias in news +across domains. +We first created a large and +manually-annotated dataset for sentence-level fac- +tuality and bias prediction. We provided a detailed +analysis of data demonstrating the reliability of the +annotation and models. We also built a strong fine- +tuned BERT baseline for fine-grained factuality +prediction, and a sentence-level media bias model +across domains. Based on our findings, biased +spans are more numerous in words and emotions +compared to factual spans, and the distribution of +bias in news articles may vary according to domain +and media outlets, in contrast to factual spans. +4 + +Acknowledgements +The authors are grateful to Isabelle Carvalho +for providing valuable judgements on disagree- +ments annotation cases, as well as SINCH, CNPq, +FAPEMIG and FAPESP for partially funding this +project. +References +Ramy Baly, Giovanni Da San Martino, James Glass, +and Preslav Nakov. 2020. We can detect your bias: +Predicting the political ideology of news articles. In +Proceedings of the 2020 Conference on Empirical +Methods in Natural Language Processing), pages +4982–4991, Held Online. +Ramy Baly, Georgi Karadzhov, Dimitar Alexandrov, +James Glass, and Preslav Nakov. 2018. +Predict- +ing factuality of reporting and bias of news media +sources. In Proceedings of the 2018 Conference on +Empirical Methods in Natural Language Processing, +pages 3528–3539, Brussels, Belgium. +Ramy Baly, Georgi Karadzhov, Abdelrhman Saleh, +James Glass, and Preslav Nakov. 2019. Multi-task +ordinal regression for jointly predicting the trustwor- +thiness and the leading political ideology of news +media. In Proceedings of the 2019 Conference of the +North American Chapter of the Association for Com- +putational Linguistics: Human Language Technolo- +gies, pages 2109–2116, Minneapolis, Minnesota. +Shikha Bordia and Samuel R. Bowman. 2019. Identify- +ing and reducing gender bias in word-level language +models. In Proceedings of the 17th Conference of +the North American Chapter of the Association for +Computational Linguistics: Student Research Work- +shop, pages 7–15, Minneapolis, Minnesota. +Robert Entman. 2007. +Framing bias: Media in the +distribution of power. Journal of Communication, +57(1):163–173. +Lisa Fan, Marshall White, Eva Sharma, Ruisi Su, +Prafulla Kumar Choubey, Ruihong Huang, and +Lu Wang. 2019. In plain sight: Media bias through +the lens of factual reporting. In Proceedings of the +2019 Conference on Empirical Methods in Natural +Language Processing and the 9th International Joint +Conference on Natural Language Processing, pages +6343–6349, Hong Kong, China. +Felix Hamborg. 2020. Media bias, the social sciences, +and NLP: Automating frame analyses to identify +bias by word choice and labeling. In Proceedings of +the 58th Annual Meeting of the Association for Com- +putational Linguistics: Student Research Workshop, +pages 79–87, Held Online. +Mohit Iyyer, Peter Enns, Jordan Boyd-Graber, and +Philip Resnik. 2014. Political ideology detection us- +ing recursive neural networks. In Proceedings of the +52nd Annual Meeting of the Association for Com- +putational Linguistics, pages 1113–1122, Baltimore, +Maryland. +Yuanyuan Lei, Ruihong Huang, Lu Wang, and Nick +Beauchamp. 2022. Sentence-level media bias analy- +sis informed by discourse structures. In Proceedings +of the 2022 Conference on Empirical Methods in +Natural Language Processing, pages 10040 – 10050, +Abu Dhabi, United Arab Emirates. +Sora +Lim, +Adam +Jatowt, +Michael +Färber, +and +Masatoshi Yoshikawa. 2020. Annotating and ana- +lyzing biased sentences in news articles using crowd- +sourcing. In Proceedings of the Twelfth Language +Resources and Evaluation Conference, pages 1478– +1484, Marseille, France. +Yujian Liu, Xinliang Frederick Zhang, David Wegs- +man, Nicholas Beauchamp, and Lu Wang. 2022. +POLITICS: Pretraining with same-story article com- +parison for ideology prediction and stance detection. +In Findings of the Association for Computational +Linguistics: 2022 Annual Conference of the North +American Chapter of the Association for Computa- +tional Linguistics, pages 1354–1374, Seattle, United +States. +Julie Mastrine. 2022. How to Spot 16 Types of Media +Bias. AllSides: Don’t be fooled by media bias & +misinformation, California, United States. +P. R Pasqualotti. 2008. Reconhecimento de expressões +de emoções na interação mediada por computador. +Master’s thesis, Dissertação de Mestrado em Ciência +da Computação. Pontifícia Universidade Católica do +Rio Grande do Sul - PUCRS, Porto Alegre, Brasil. +Marta Recasens, Cristian Danescu-Niculescu-Mizil, +and Dan Jurafsky. 2013. Linguistic models for an- +alyzing and detecting biased language. In Proceed- +ings of the 51st Annual Meeting of the Association +for Computational Linguistics, pages 1650–1659, +Sofia, Bulgaria. +Shamik Roy and Dan Goldwasser. 2020. Weakly su- +pervised learning of nuanced frames for analyzing +polarization in news media. In Proceedings of the +2020 Conference on Empirical Methods in Natural +Language Processing, pages 7698–7716, Held On- +line. +Eitan Sapiro-Gheiler. 2019. Examining political trust- +worthiness through text-based measures of ideology. +In Proceedings of the AAAI Conference on Artifi- +cial Intelligence, volume 33, pages 10029–10030, +Hawaii, United States. +Timo Spinde, Manuel Plank, Jan-David Krieger, Terry +Ruas, Bela Gipp, and Akiko Aizawa. 2021. Neu- +ral media bias detection using distant supervision +with BABE - bias annotations by experts. In Find- +ings of the Association for Computational Linguis- +tics: EMNLP 2021, pages 1166–1177, Punta Cana, +Dominican Republic. +5 + diff --git a/TdFKT4oBgHgl3EQfki6p/content/tmp_files/load_file.txt b/TdFKT4oBgHgl3EQfki6p/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ccd6891bc5142b450f0f44649ecdc379ef7ed37 --- /dev/null +++ b/TdFKT4oBgHgl3EQfki6p/content/tmp_files/load_file.txt @@ -0,0 +1,468 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf,len=467 +page_content='Factual or Biased?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Predicting Sentence-Level Factuality and Bias of News Francielle Vargas1,2, Fabiana Góes1, Thiago Pardo1, Fabrício Benevenuto2 1Institute of Mathematical and Computer Sciences, University of São Paulo, Brazil 2Computer Science Department, Federal University of Minas Gerais, Brazil francielleavargas@usp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='br, fabianagoes@usp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='br, taspardo@icmc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='usp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='br, fabricio@dcc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='ufmg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='br Abstract We present a study on sentence-level factual- ity and bias of news articles across domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' While prior work in NLP has mainly focused on predicting the factuality of article-level news reporting and political-ideological bias of news media, we investigated the effects of framing bias in factual reporting across do- mains so as to predict factuality and bias at the sentence level, which may explain more accu- rately the overall reliability of the entire doc- ument.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' First, we manually produced a large sentence-level annotated dataset, titled Fact- News, composed of 6,191 sentences from 100 news stories by three different outlets, result- ing in 300 news articles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Further, we studied how biased and factual spans surface in news articles from different media outlets and differ- ent domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Lastly, a baseline model for fac- tual sentence prediction was presented by fine- tuning BERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We also provide a detailed anal- ysis of data demonstrating the reliability of the annotation and models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 1 Introduction While journalism is tied to a set of ethical standards and values, including truth, fairness and impartial- ity, it often strays from objective facts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' As a result, biased news are produced (Mastrine, 2022) with relevant potential to influence the public’s percep- tion (Hamborg, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Bias, according to Recasens et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2013), is linked to lexical and grammatical cues, identified by the literature on subjectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Framing bias (Recasens et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Entman, 2007) is composed by subjective words or phrases linked to a particular point of view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In contrast, factuality is linked to impartiality, identified by the literature on objectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Most researchers address media bias and factuality either at the level of media outlet (Baly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', 2018) or at the level of individual arti- cle (Roy and Goldwasser, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Baly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Nevertheless, each article itself comprises multiple sentences, which vary in their embedded bias (Lim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', 2020), as shown in the example in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Sentence-level news article Label(s) Title Food inflation has shown greater resis- tance than that of so-called durable biased S1 Brazil had deflation for the third consecu- tive month factual S2 In September, prices measured by the IPCA fell, on average, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='29% factual S3 In the last 12 months, the IPCA recorded inflation of 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='17% factual S4 Food inflation has shown more resistance than that of so-called durable products factual S5 Food inflation still distorts our perception of prices, and the economy biased S6 "Today we are trying to survive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' ", said a customer at the supermarket.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' quotes Table 1: Sentence-Level Factuality and Bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' As shown in Table 1, biased sentences present subjectivity markers (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' greater - Title), or the point of view of a journalist (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' S5) and may influence readers’ perception.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' There are also direct quotes, which are neither biased sentences nor fac- tual sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Therefore, the news media sources surely affect the power of swaying public opinion through the practical limitation to neutrality and ob- jectivity, or using deliberate attempts to go against or in favor of something or someone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Bias can be broadly categorized into two classes: framing and epistemological (Recasens et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In general, framing bias is more explicit than epistemological bias (Bordia and Bowman, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Framing bias occurs when subjective or opinion-based words are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' For instance, “Ter- rorists are horrible and prejudiced people", the words “horrible” and ‘’prejudiced” show an evalu- ation from the writer’s point of view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' On the other hand, epistemological biases are entailed, asserted, or hedged in the text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' For example, in the sen- tence “Kuypers claimed that the mainstream press in America tends to favor liberal viewpoints,” the word claimed has a doubtful effect on Kuypers’s 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='11850v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='CL] 27 Jan 2023 statement as opposed to stated in the sentence — “Kuypers stated that the mainstream press in Amer- ica tends to favor liberal viewpoints” (Bordia and Bowman, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' To fill this important research gap and mitigate this indisputably relevant social problem, we ad- dressed both biased and factual sentences predic- tion by using a strategy that has proved to be effective: we created a new dataset titled Fact- News composed of 6,191 sentences from 300 news documents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The same news report was extracted from three different journalistic vehicles from dif- ferent domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Furthermore, each sentence of the dataset was annotated according to three dif- ferent classes: (a) factual spans, which consists of sentences presented with impartiality focused on objective facts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (b) biased spans, consisting of subjective sentences that stray from the objec- tive and break the commitment to impartiality, and for biased spans annotations were done according to 16 types of media bias proposed by AllSides (Mastrine, 2022);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' additionally, (c) quotes, direct statements often followed by quotation marks that journalist in general uses to report the speech of someone involved in the reported event.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Then, we trained two different models using fine-tuned BERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The first model predicts whether the sen- tence is factual or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The second model predicts whether the sentence is biased or not, which ad- vancing in the literature, present results for differ- ent domains besides political domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' As a result, a baseline model for sentence-level factuality pre- diction was proposed, and a sentence-level media bias model across domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The contributions of this study are summarized as follows: We focused on an under-explored but surely relevant problem: predicting factuality of news.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We further study sentence-level media bias, which is definitely under-explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We created a large and manually-annotated dataset of news articles at the sentence-level for both tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The dataset and code are avail- able, which may facilitate future research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We presented a baseline model for sentence- level factuality prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We provided data analysis on factual and bi- ased sentences demonstrating the reliability of the annotation schema and models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2 Related Work Article-level media bias consists in predicting whether an entire news report is biased.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Most of the proposals have contemplated text-based meth- ods in order to measure news ideology, similar to those proposed by Sapiro-Gheiler (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In the same settings, Iyyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2014) predicted po- litical ideology using recursive neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Baly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2019) proposed a multi-task regression framework aiming to predict the trustworthiness and ideology of news media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2022) applied the pre-trained language model for the po- litical domain so as to characterize political stance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Baly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2020) created a model from learning media sources such as a shortcut for predicting ide- ology using adversarial networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In this paper, we focus on sentence-level media bias across domains besides providing baselines for sentence-level fac- tuality prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Sentence-level media bias consists of a task aiming to predict whether each sentence of a news report is biased or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' It is an under-explored issue in the area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2019) provided the first sentence-level annotated dataset titled BASIL, com- posed of 300 news articles annotated with 1,727 biased spans and 6,257 non-biased sentences, as well as fine-tuning BERT baseline experiments reaching an F1-Score of 47,27%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Lim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2020) a new dataset titled biased-sents was cre- ated, composed of 966 sentences from 46 English- language news articles covering 4 different events.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Spinde et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2021) provided an annotation-expert project through a new dataset titled BABE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' It con- sists of 3,700 sentences balanced among topics and outlets, and a new fine-tuned BERT baseline reaching an F1-Score of 80,04%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Finally, Lei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2022) showed that embedded discourse struc- ture for sentence-level media bias effectively in- creases the recall of bias sentence identification by 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='27% - 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='62%, and precision by 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='82% - 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='48%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Factuality consists of a task for predicting whether a news report is factual or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' This task is definitely under-explored by the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Baly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (2018) provided the first study on predicting the article-level factuality of reporting and bias of news media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' They present the characterization of the entire news media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In this paper, we propose the Sentence-level factuality task, which consists on predicting whether a sentence is factual or not in news articles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We further provide a study on sentence-level factuality and bias in news articles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2 3 FactNews Dataset We collected, annotated, and hence proposed the dataset titled FactNews.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' It is a sentence-level an- notated dataset that contains 6,191 annotated sen- tences, as follows: 4,302 sentences are factual spans;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 1,389 sentences are quotes, and 558 sen- tences are biased spans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' A dataset overview is shown in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Data Collection: FactNews was collected from 100 news reports in triples - the same story from 3 different Brazilian media news outlets - (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Folha de São Paulo1, O Globo2, and Estadão3), resulting in 300 documents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Using a statistical approach and a search algorithm, we collected news articles related to 6 different domains (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' politics, world, daily life, sports, science, and culture) from periods 2006-2007 and 2021-2022 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In accordance with relevant literature of the area, we selected three news articles from different news outlets about the same topic or story.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Data Annotation: Corroborating our objective of classifying factuality and bias at the sentence level, we segmented each news article into sen- tences and annotated them according to three dif- ferent classes: (a) factual spans, (b) biased spans, and (c) quotes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' To classify biased spans, we pro- posed an annotation schema based on 16 types of media bias proposed by AllSides (Mastrine, 2022) (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' sensationalism, slant, opinion statements, spin, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Two different annotators from different regions (southeast and north east) performed the task, a linguist and a computer scientist, both with at least a Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' degree or Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' candidate status.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Data Evaluation: We computed inter-annotator agreement using reliable literature metrics: Co- hen’s kappa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We obtained a Cohen’s kappa of 94,42%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The annotation process was performed by two different annotators and disagreement cases were judged by two different judges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In addi- tion, two different rounds of reviews were car- ried out, in which annotators could discuss doubts and re-evaluate the given labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' The agree- ments/disagreements data is available at (BLIND).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Data Analysis: Table 2 shows a summary of FactNews dataset statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Most of the sentences (68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='51%) are factual spans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In contrast, the quotes and biased categories compose 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='52% and 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='81% of all labels, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' On average, each news 1https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='folha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='uol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='com.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='br/ 2https://oglobo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='globo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='com/ 3https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='estadao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='com.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='br/ article is composed of 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='14 factual sentences, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='27 biased sentences, and 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 quote sentences, being 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='36 words in factual sentences, 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='14 words in biased sentences, and 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='38 words in quotes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In general, biased spans present more words than fac- tual spans at all grammar categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Furthermore, emotions are more predominant in biased spans than factual spans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Lastly, the titles of news articles hold 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='36% of bias, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='33% of quotes, and 86% of factuality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' On the other hand, the body of news ar- ticles holds 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='35% of bias, 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='38% of quotes and 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='27% of factuality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Figure 1 shows the distribu- tion of factual and biased sentences across domains according to each media news outlets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Notably, the distribution of factuality is equivalent across differ- ent domains .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Differently, the distribution of bias varies in accordance with the domain and media outlets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Figure 1: Distribution of factual and biased spans across domains from different media news sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 4 Baseline Experiments We introduce a strong baseline sentence-level factu- ality prediction model for FactNews.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We study the factuality prediction problem as a binary classifica- tion task (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', whether a sentence is factual or not), as shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We further train a model for sentence-level media bias task (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=', whether a sentence is biased or not), which advancing in the literature, rather than focusing on political domain, we presents results for different domains besides the political.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Both results are shown in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Figure 2: Sentence-level factuality prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Note that we assume that factual consists of in- formation that deals with facts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In other words, it is a type of information presented with impartiality fo- cused on objective facts, in contrast to non-factual that break the commitment to impartiality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 3 1200 - Folha de Sao Paulo Sentences Estadao 400 - O Globo pallical worid daily life sports aulbure science120 Folha de Sao Paulo Sentences Estadao 140 O Globo pase!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='g 区 0 pliical worid daily life sports aulbure sienceclass variable Sentences Factual No-Factual (factual spans) (biased spans+quotes)Description Folha de São Paulo Estadão O Globo All factual quotes biased factual quotes biased factual quotes biased #Articles 100 100 100 300 #Sentences 1,494 450 231 1,428 483 182 1,320 458 145 6,191 #Words 30,374 7,946 5,177 30,589 8,504 4,002 25,505 7,740 3,195 123,032 Avg Sentences/Article 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='94 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='78 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='28 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='00 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='19 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='20 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='15 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='84 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='15 Avg Words/Sentences 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='33 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='65 22,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='41 21,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='45 17,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='60 21,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='98 19,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='32 16,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='89 22,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 19,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='96 Body/Title Body 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='337 440 207 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='218 473 162 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='089 441 131 5,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='498 Title 157 10 24 210 10 20 231 17 14 693 Domains Political 912 340 130 870 352 106 748 351 64 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='873 World 224 48 31 224 49 27 216 32 29 880 Sports 100 23 34 124 25 29 98 18 39 490 Daily Life 132 11 2 98 7 4 148 7 4 413 Culture 98 26 32 72 42 15 77 45 5 412 Science 28 2 2 40 8 1 33 5 4 123 Part-of-speech (Avg) Noun 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='85 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='09 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='72 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='21 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='12 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='60 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='59 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='82 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='19 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='79 Verb 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='20 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='55 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='60 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='28 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='51 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='53 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='00 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='44 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='57 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='18 Adjective 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='32 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='11 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='08 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='97 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='48 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='14 Adverb 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='93 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='59 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='81 Pronoun 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='52 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='97 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='56 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='59 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='69 Conjunction 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='57 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='88 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='62 Emotion (Avg) Happiness 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='20 Disgust 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='04 Fear 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='18 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='80 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='63 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='41 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='77 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='56 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='05 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='60 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='50 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='16 Anger 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='09 Surprise 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='02 Sadness 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='86 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='71 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='52 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='17 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='55 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='48 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='56 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='40 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='19 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='93 Polarity (Avg) Positive 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='41 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='25 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='93 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='55 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='22 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='95 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='26 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='26 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='96 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='86 Negative 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='06 Neutral 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='55 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='77 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='93 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='92 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='52 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='03 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='91 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='28 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='56 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='94 Table 2: Dataset Statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Model Architecture and Settings: In data preparation, we only removed special characters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' As learning method, we used the SVM with linear kernel, and balanced classes using the undersam- pling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We split our data to train (90%), validation (10%) and applied the 10-fold cross-validation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We implemented a set of experiments using 4 differ- ent model architectures: (i) BERT fine-tuning (best model): we held batch size at 64, maximum of 500 features, learning rate at 2e-05 and number of epochs at 4;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (ii) subjective-lexicons: we evaluated features based on sentiment and emotion lexicons (Pasqualotti, 2008), which present semantic po- larity and emotion types;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' (iii) part-of-speech: we evaluated features based on part-of-speech, more precisely, noun, verb, adjective, adverb, pronoun and conjunctions, supported by a pos tagging from spacy library;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' and (iv) TF-IDF: baseline VSM us- ing term frequency–inverse document frequency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='1 Results Table 3 summarizes the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' For sentence-level factuality prediction, the baseline model obtained 87% of F1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' For sentence-level media bias in dif- ferent domains besides political domain, the best model obtained 67% of F1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Notably, the part-of- speech model also presented relevant results for both tasks in contrast to the subjective-lexicons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Sentence-Level Factuality Precision Recall F1-Score BERT fine-tuning 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='87 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='87 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='87 Part-of-speech 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='77 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='77 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='76 TF-IDF 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='66 Polarity-lexicon 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='63 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='62 Emotion-lexicon 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='61 Sentence-Level Media Bias Precision Recall F1-Score BERT fine-tuning 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='67 Part-of-speech 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='66 Polarity-lexicon 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='50 Emotion-lexicon 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='52 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='50 TF-IDF 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='78 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='58 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content='48 Table 3: The table shows our baseline for the sentence- level factuality prediction, as well as results for the sentence-level media bias prediction across domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 5 Conclusions We present a study on factuality and bias in news across domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We first created a large and manually-annotated dataset for sentence-level fac- tuality and bias prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We provided a detailed analysis of data demonstrating the reliability of the annotation and models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We also built a strong fine- tuned BERT baseline for fine-grained factuality prediction, and a sentence-level media bias model across domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Based on our findings, biased spans are more numerous in words and emotions compared to factual spans, and the distribution of bias in news articles may vary according to domain and media outlets, in contrast to factual spans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 4 Acknowledgements The authors are grateful to Isabelle Carvalho for providing valuable judgements on disagree- ments annotation cases, as well as SINCH, CNPq, FAPEMIG and FAPESP for partially funding this project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' References Ramy Baly, Giovanni Da San Martino, James Glass, and Preslav Nakov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' We can detect your bias: Predicting the political ideology of news articles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing), pages 4982–4991, Held Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Ramy Baly, Georgi Karadzhov, Dimitar Alexandrov, James Glass, and Preslav Nakov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Predict- ing factuality of reporting and bias of news media sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3528–3539, Brussels, Belgium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Ramy Baly, Georgi Karadzhov, Abdelrhman Saleh, James Glass, and Preslav Nakov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Multi-task ordinal regression for jointly predicting the trustwor- thiness and the leading political ideology of news media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 2019 Conference of the North American Chapter of the Association for Com- putational Linguistics: Human Language Technolo- gies, pages 2109–2116, Minneapolis, Minnesota.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Shikha Bordia and Samuel R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Bowman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Identify- ing and reducing gender bias in word-level language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 17th Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Work- shop, pages 7–15, Minneapolis, Minnesota.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Robert Entman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Framing bias: Media in the distribution of power.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Journal of Communication, 57(1):163–173.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Lisa Fan, Marshall White, Eva Sharma, Ruisi Su, Prafulla Kumar Choubey, Ruihong Huang, and Lu Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In plain sight: Media bias through the lens of factual reporting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, pages 6343–6349, Hong Kong, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Felix Hamborg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Media bias, the social sciences, and NLP: Automating frame analyses to identify bias by word choice and labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 58th Annual Meeting of the Association for Com- putational Linguistics: Student Research Workshop, pages 79–87, Held Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Mohit Iyyer, Peter Enns, Jordan Boyd-Graber, and Philip Resnik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Political ideology detection us- ing recursive neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 52nd Annual Meeting of the Association for Com- putational Linguistics, pages 1113–1122, Baltimore, Maryland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Yuanyuan Lei, Ruihong Huang, Lu Wang, and Nick Beauchamp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Sentence-level media bias analy- sis informed by discourse structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 10040 – 10050, Abu Dhabi, United Arab Emirates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Sora Lim, Adam Jatowt, Michael Färber, and Masatoshi Yoshikawa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Annotating and ana- lyzing biased sentences in news articles using crowd- sourcing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 1478– 1484, Marseille, France.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Yujian Liu, Xinliang Frederick Zhang, David Wegs- man, Nicholas Beauchamp, and Lu Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' POLITICS: Pretraining with same-story article com- parison for ideology prediction and stance detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Findings of the Association for Computational Linguistics: 2022 Annual Conference of the North American Chapter of the Association for Computa- tional Linguistics, pages 1354–1374, Seattle, United States.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Julie Mastrine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' How to Spot 16 Types of Media Bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' AllSides: Don’t be fooled by media bias & misinformation, California, United States.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' R Pasqualotti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Reconhecimento de expressões de emoções na interação mediada por computador.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Master’s thesis, Dissertação de Mestrado em Ciência da Computação.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Pontifícia Universidade Católica do Rio Grande do Sul - PUCRS, Porto Alegre, Brasil.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Marta Recasens, Cristian Danescu-Niculescu-Mizil, and Dan Jurafsky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Linguistic models for an- alyzing and detecting biased language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceed- ings of the 51st Annual Meeting of the Association for Computational Linguistics, pages 1650–1659, Sofia, Bulgaria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Shamik Roy and Dan Goldwasser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Weakly su- pervised learning of nuanced frames for analyzing polarization in news media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, pages 7698–7716, Held On- line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Eitan Sapiro-Gheiler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Examining political trust- worthiness through text-based measures of ideology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Proceedings of the AAAI Conference on Artifi- cial Intelligence, volume 33, pages 10029–10030, Hawaii, United States.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Timo Spinde, Manuel Plank, Jan-David Krieger, Terry Ruas, Bela Gipp, and Akiko Aizawa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' Neu- ral media bias detection using distant supervision with BABE - bias annotations by experts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' In Find- ings of the Association for Computational Linguis- tics: EMNLP 2021, pages 1166–1177, Punta Cana, Dominican Republic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} +page_content=' 5' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/TdFKT4oBgHgl3EQfki6p/content/2301.11850v1.pdf'} diff --git a/TtE0T4oBgHgl3EQfVAA3/vector_store/index.faiss b/TtE0T4oBgHgl3EQfVAA3/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..dd3c312716dc713ca8dcdf643e7672b365749759 --- /dev/null +++ b/TtE0T4oBgHgl3EQfVAA3/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7376f5c766a115e72624aa7dc75b54f3a5fa1be8ed088495579410c872c7aa9 +size 11862061 diff --git a/UNE5T4oBgHgl3EQfbQ8x/vector_store/index.faiss b/UNE5T4oBgHgl3EQfbQ8x/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..3842648aba42e1bd9e0b70051eeb18c5090f88ec --- /dev/null +++ b/UNE5T4oBgHgl3EQfbQ8x/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51ded15ebfbbba66eb2ae3e03508d08671924f69e991b923ec94ddd6d9d07847 +size 2097197 diff --git a/VdAyT4oBgHgl3EQf8vrJ/content/tmp_files/2301.00863v1.pdf.txt b/VdAyT4oBgHgl3EQf8vrJ/content/tmp_files/2301.00863v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f50d12c7e99586a599e020511bcf23ea6ef7fe3 --- /dev/null +++ b/VdAyT4oBgHgl3EQf8vrJ/content/tmp_files/2301.00863v1.pdf.txt @@ -0,0 +1,1529 @@ +arXiv:2301.00863v1 [math.AP] 2 Jan 2023 +Sensitivity Analysis of the Current, the Electrostatic +Capacity, and the Far-Field of the Potential with Respect +to Small Perturbations in the Surface of a Conductor +Jihene Lagha ∗ +Habib Zribi† +Abstract +We derive asymptotic expansions of the current, the electrostatic capacity, and the +far-field of the electrostatic potential resulting from small perturbations in the shape of +an isolated conductor with C2-surface. Our derivation is rigorous by using systematic +way, based on layer potential techniques and the field expansion (FE) method. We +then use these results to study the sensibility analysis of the first eigenvector of the +L2-adjoint of the Neumann-Poincar´e (NP) operator with respect to small perturbations +in the surface of its domain. +Mathematics Subject Classification (MSC2000): 35B30, 35B40 +Keywords: Isolated conductor, electrostatic capacity, small surface perturbations, boundary integral method, +field expansion method, Laplace equation, Neumann-Poincar´e operator type +1 +Introduction and statement of the main results +Suppose that an isolated conductor occupies a bounded domain Ω in R3, with a connected +C2-surface ∂Ω. A conductor is a volume which contains free charges. In the presence of +an external electric field, each free charge in the conductor redistributes and very quickly +reaches electrostatic equilibrium (see [17, 18]). The free charges are redistributed in such a +way the electric field inside the conductor vanishes and the electrical filed is always perpen- +dicular everywhere on the surface of the conductor. The electrostatic potential is constant +throughout the volume of the conductor and has the same value on its surface, let’s say 1 +volt. More precisely, let u be the electrostatic potential in the presence of a conductor Ω at +equilibrium in R3. It is the unique solution of the following problem + + + + + + + + + +∆u = 0 +in R3\Ω, +u = 1 +on ∂Ω, +lim +|x|→∞ u(x) = 0. +(1.1) +∗Universit´e de Tunis El Manar, Facult´e des Sciences de Tunis, LR11ES13 Laboratoire d’Analyse stochas- +tique et Applications, 2092 Tunis, Tunisie (lagha.jihene@yahoo.fr) +†Department of Mathematics, College of Science, University of Hafr Al Batin, P.O. 1803, Hafr Al Batin +31991, Saudi Arabia (zribi.habib@yahoo.fr) +1 + +The electrostatic capacity with respect to infinity of the conductor Ω, denoted cap(Ω), is +defined as the ratio of the charge in equilibrium on it to the value of the potential u at its +surface ∂Ω. That is, the capacity is the charge producing this potential which is given by +Gauss’ integral (see for instance [19, 21, 11]) +cap(Ω) = − 1 +4π +� +∂Ω +∂u +∂n(x)dσ(x) +� += − 1 +4π +� +∂Ω +∂u +∂n(x)u(x)dσ(x) +� +, +(1.2) +where n and dσ are the unit outward normal and the length element to the boundary +∂Ω, respectively. The electrostatic capacity cap(Ω) may also be defined as the quantity of +electrical charge which must be given to the conductor Ω to raise its potential to the value +unity, it depends on its own form and size; being greater as the seize increased. In this work, +we investigate the sensitivity analysis of the electrostatic capacity with small changes in the +form of its domain. +It is well-known that 0 ≤ cap(Ω) < +∞, which can be easily proved by applying the +Green’s identity to the integral in (1.2) over the unbounded domain R3\Ω. The electrostatic +capacity can be determined from the far-field of the electrostatic potential u defined in (1.1). +In fact, in [20, 10], u has the asymptotic expansion +u(x) = cap(Ω) +|x| ++ O( 1 +|x|2 ) +as |x| → ∞. +(1.3) +Therefore, in order to find the electrostatic capacity, we have to pick out the coefficient of +1/|x| in the asymptotic expansion (1.3). The capacities are known analytically for a few +simple shapes like sphere, ellipsoid, lens, spindle, and anchor-ring. See [22, 21]. +Let Ωǫ be an ǫ-perturbation of Ω, i.e., there is a function h ∈ C1(∂Ω) such that ∂Ωǫ is +given by +∂Ωǫ = +� +˜x = x + ǫh(x)n(x) := Ψǫ(x)|x ∈ ∂Ω +� +. +We denote by uǫ the perturbed electrostatic potential in the presence of the conductor Ωǫ +in electrostatic equilibrium. It is the unique solution of the following problem + + + + + + + + + +∆uǫ = 0 +in R3\Ωǫ, +uǫ = 1 +on ∂Ωǫ, +lim +|x|→∞ uǫ(x) = 0. +(1.4) +The perturbed electrostatic capacity with respect to infinity of the conductor Ωǫ is given by +cap(Ωǫ) = − 1 +4π +� +∂Ωǫ +∂uǫ +∂˜n (y)d˜σ(y), +(1.5) +where ˜n and d˜σ are the unit outward normal and the length element to the boundary ∂Ωǫ, +respectively. Similarly to (1.3), uǫ satisfies +uǫ(x) = cap(Ωǫ) +|x| ++ O( 1 +|x|2 ) +as |x| → ∞. +(1.6) +Our goal is to find asymptotic expansions for the current, the electrostatic capacity, and +the far-field of the electrostatic potential resulting from small perturbations on the surface +2 + +of a conductor at equilibrium in free space. The main idea is to adopt the FE method to +derive formal asymptotic expansions. Then, based on layer potential techniques, we prove +rigorously those asymptotic expansions. In connection with this, we refer to recent works in +the same context [13, 23, 24, 16, 5, 6, 14, 12, 15]. +The first achievement of this paper is the following theorem, a rigorous derivation of the +asymptotic expansion of the perturbed current ∂uǫ/∂˜n on ∂Ωǫ as ǫ → 0. +Theorem 1.1 Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω. Let uǫ and u be the solutions of +(1.4) and (1.1), respectively. The following asymptotic expansion holds: +∂uǫ +∂˜n (˜x) = ∂u +∂n(x) + ǫ +� +2τ(x)h(x)∂u +∂n(x) + ∂v +∂n(x) +� ++ O(ǫ2), +(1.7) +where the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h, τ is +the mean curvature of Ω, and v is the unique solution to + + + + + + + + + + + +∆v = 0 +in R3\Ω, +v = −h∂u +∂n +on ∂Ω, +lim +|x|→∞ v(x) = 0. +(1.8) +The second result of this paper is the following theorem, we rigorously derive the asymp- +totic expansion of cap(Ωǫ) as ǫ → 0. +Theorem 1.2 The following asymptotic expansion holds: +cap(Ωǫ) = cap(Ω) + ǫ +4π +� +∂Ω +h +�∂u +∂n +�2 +dσ + O(ǫ2), +(1.9) +where the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h. +It is worth noticing that if h has a constant sign on ∂Ω that there exists ǫ0 > 0 such that +for ǫ < ǫ0, cap(Ωǫ) − cap(Ω) has the same sign as h. +The following theorem represents the third result of this paper, a rigorous derivation of +the asymptotic expansion of the far-field of the electrostatic potential uǫ as ǫ → 0. +Theorem 1.3 Let uǫ and u be the solutions of (1.4) and (1.1), respectively. We have the +following asymptotic expansion +uǫ(x) = u(x) + +ǫ +4π|x| +� +∂Ω +h +�∂u +∂n +�2 +dσ + O +� ǫ2 +|x| +� ++ O +� ǫ +|x|2 +� +(1.10) +as |x| → ∞ and ǫ → 0, where the remainders O(ǫ2/|x|) and O(ǫ/|x|2) depend only on the +C2-norm of X, the C1-norm of h, and the distance(origin, Ω). +These asymptotic expansions had not been established before this work. They can be +taken into account in the design of conductors to avoid negative effects due to small changes +in their shapes. Our asymptotic expansions are still valid in the case of small perturbations +of one of the walls of a condenser in electrostatic equilibrium, the FE method works well, +but more elaborate arguments are needed for the layer potential techniques method. +3 + +The asymptotic expansions can be used to design effective algorithms to recover certain +properties of the perturbation of the shape of an isolated conductor. That is, we would like +to find a method for determining the shape of a conductor by taking one or a combination of +current, electrostatic capacity, and electrostatic potential measurements. One of solutions +is to extend that optimization approach in [2] by using electrostatic capacity measurements. +This article is organized as follows. In the next section we formally derive the asymptotic +expansions (1.7), (1.9), and (1.10) by using the FE method. In the section 2, based on layer +potential techniques method we rigorously prove those in fact the formal expansions hold. In +the last section, we derive an asymptotic expansion of the first eigenvector of the L2−adjoint +of the NP operator resulting from small perturbations of the surface of its domain. +2 +Formal derivations: the FE method +We refer to [13] for further details on the following concepts and definitions. Suppose ∂Ω +has an orthogonal parametrization X(ξ, θ), that is, there is an open subset ϑ of R2 such +that ∂Ω := +� +x = X(ξ, θ), (ξ, θ) ∈ ϑ +� +, where X is a C2-function satisfying (Xξ := +dX +dξ ) · +(Xθ := dX +dθ ) = 0 and Xξθ = Xθξ. We point out that a revolution surface has an orthogonal +parametrization. The vectors Tξ = Xξ/|Xξ| and Tθ = Xθ/|Xθ| form an orthonormal basis +for the tangent plane to ∂Ω at x = X(ξ, θ). The tangential derivative on ∂Ω is defined by +∂ +∂T = +∂ +∂Tξ Tξ + +∂ +∂Tθ Tθ. We denote by G the matrix of the first fundamental form with respect +to the basis {Xξ, Xθ}. +For w ∈ C1(ϑ), the gradient operator in local coordinates satisfies +∇ξ,θw = +� +G11 +∂w +∂Tξ +Tξ + +� +G22 +∂w +∂Tθ +Tθ = G +1 +2 ∂w +∂T . +(2.1) +For w ∈ C2(ϑ), the restriction of ∆ in R3\∂Ω to a neighbourhood of ∂Ω can be expressed +as: +∆w = ∂2w +∂n2 − 2τ ∂w +∂n + ∆Gw, +(2.2) +where ∆G is the Laplace–Beltrami operator associated to G which is given by +∆Gw = +1 +√ +detG ∇ξ,θ · +�√ +detGG−1∇ξ,θw +� +. +(2.3) +We use h(ξ, θ) for simplifying the term h(X(ξ, θ)) and hξ(ξ, θ), hθ(ξ, θ) for the tangential +derivatives of h(X(ξ, θ)). Then, ˜x = X(ξ, θ) + ǫh(ξ, θ)n(x) is a parametrization of ∂Ωǫ. +The following asymptotic expansions for the normal derivative ˜n(˜x) and the length ele- +ment d˜σ(˜x) hold. For proofs, see [13]. +Lemma 2.1 Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω. Then, the outward unit normal +˜n(˜x) to ∂Ωǫ at ˜x can be expanded uniformly as +˜n(˜x) := +˜Xξ ∧ ˜Xθ +| ˜Xξ ∧ ˜Xθ| += +∞ +� +k=0 +ǫknk(x), +(2.4) +4 + +where the vector-valued functions nk are uniformly bounded regardless of k. In particular, +for x ∈ ∂Ω, we have +n0(x) = n(x), +n1(x) = − ∂h +∂T (x)T (x). +Likewise, the length element d˜σ(˜x) has the following uniformly expansion +d˜σ(˜x) := | ˜Xξ ∧ ˜Xθ|dξdθ = | ˜Xξ ∧ ˜Xθ| +� +det(G) +dσ(x) = +∞ +� +k=0 +ǫkσ(k)(x)dσ(x), +(2.5) +where σ(k) are uniformly bounded regardless of k with +σ(0)(x) = 1, +σ(1)(x) = −2h(x)τ(x). +Let uǫ be the solution to (1.4). In order to derive a formal asymptotic expansion for uǫ, +we apply the FE method, see [24, 8, 13, 16, 23]. Firstly, we expand uǫ in powers of ǫ, +uǫ(x) = u0(x) + ǫu1(x) + ǫ2u2(x) + · · · , +x ∈ R3\Ωǫ, +(2.6) +where ul are defined on R3\∂Ω. Since ul satisfy + + + +∆ul = 0 +in R3\Ω, +lim +|x|→∞ ul(x) = 0. +(2.7) +In order to justify the first equation in (2.7), we substitute (2.6) in the Laplace equation +∆uǫ = 0 in R3\Ωǫ to get ∆ul = 0 in R3\Ωǫ for ǫ > 0. Because ǫ is arbitrary, we confirm +that ∆ul = 0 in R3\Ω. +Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω. The following Taylor expansion holds: +∂uǫ +∂˜n (˜x) = +� +∇u0 +� +x + ǫh(x)n(x) +� ++ ǫ∇u1 +� +x + ǫh(x)n(x) +�� +· ˜n(˜x) + O(ǫ2) += +� +∇u0(x) + ǫh(x)∇2u0(x)n(x) + ǫ∇u1(x) +� +· +� +n(x) − ǫ ∂h +∂T (x)T (x) +� ++ O(ǫ2) +=∂u0 +∂n (x) + ǫh(x)∂2u0 +∂n2 (x) + ǫ∂u1 +∂n (x) − ǫ ∂h +∂T (x) · ∂u0 +∂T (x) + O(ǫ2) +=∂u0 +∂n (x) + 2ǫτ(x)h(x)∂u0 +∂n (x) + ǫ∂u1 +∂n (x) +− ǫ ∂h +∂T (x) · ∂u0 +∂T (x) − ǫh(x)∆Gu0(x) + O(ǫ2). +(2.8) +To justify the last equality, we use the representation of the Laplace operator on ∂Ω given +in (2.2) +0 = ∆u0 = ∂2u0 +∂n2 − 2τ ∂u0 +∂n + ∆Gu0 +on ∂Ω. +5 + +For ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ. We have the following Taylor expansion +uǫ(˜x) = u0 +� +x + ǫh(x)n(x) +� ++ ǫu1 +� +x + ǫh(x)n(x) +� ++ O(ǫ2) += u0(x) + ǫh(x)∂u0 +∂n (x) + ǫu1(x) + O(ǫ2). +(2.9) +Using the boundary condition uǫ(˜x) = 1 for ˜x = Ψ(x) ∈ ∂Ωǫ, we obtain from (2.9) that +u0(x) = 1, +u1(x) = −h(x)∂u0 +∂n (x), +for x ∈ ∂Ω. +(2.10) +By the uniqueness of the PDE problems (1.1) and (1.8), we get u0 ≡ u and u1 ≡ v in R3\Ω. +The fourth and fifth terms in (2.8) vanish, this is because u = 1 on ∂Ω which implies that +∂u/∂T = 0 on ∂Ω. Then Theorem 1.1 immediately follows from (2.8) formally. +A change of variables y = Ψǫ(x) for x ∈ ∂Ω in (1.5) gives +cap(Ωǫ) = − 1 +4π +� +∂Ω +∂uǫ +∂˜n (˜x)uǫ(˜x)d˜σ(˜x). +(2.11) +It then follows from (2.5), (2.8), and (2.9) that +cap(Ωǫ) = − 1 +4π +� +∂Ω +∂u +∂nudσ − ǫ +4π +� +∂Ω +h +�∂u +∂n +�2 +dσ +− ǫ +4π +� +∂Ω +∂v +∂nudσ − ǫ +4π +� +∂Ω +∂u +∂nvdσ + O(ǫ2). +(2.12) +By Green’s identity and (2.7), we have +� +∂Ω +∂v +∂nudσ = +� +∂Ω +∂u +∂nvdσ. +We get from (2.10) that +� +∂Ω +∂v +∂nudσ + +� +∂Ω +∂u +∂nvdσ = 2 +� +∂Ω +∂u +∂nvdσ = −2 +� +∂Ω +h +�∂u +∂n +�2 +dσ. +(2.13) +Thus, by (1.2), (2.12), and (2.13), we formally obtain the desired Theorem 1.2, i.e., +cap(Ωǫ) = cap(Ω) + ǫ +4π +� +∂Ω +h +�∂u +∂n +�2 +dσ + O(ǫ2). +(2.14) +As a direct consequence of (1.3), (1.6), and (2.14), the leading order term in the asymptotic +expansion of the far-field uǫ − u in Theorem 1.3 holds formally +uǫ(x) − u(x) = +ǫ +4π|x| +� +∂Ω +h +�∂u +∂n +�2 +dσ + O( ǫ2 +|x|) + O( 1 +|x|2 ) +(2.15) +as ǫ → 0 and ǫ >> 1/|x|. By the layer potential techniques method we will prove in the +subsection 3.3.3 the asymptotic expansion (2.15) with a remainder O(ǫ2/|x|) + O(ǫ/|x|2) as +ǫ → 0 and |x| → ∞ which is more better than O(ǫ2/|x|)+O(1/|x|2) as ǫ → 0 and ǫ >> 1/|x|. +6 + +3 +Layer potential techniques method +3.1 +Definitions and Preliminary results +Let Ω be a bounded C2-domain. Let Γ(x) be the fundamental solution of the Laplacian ∆ +in R3: Γ(x) = − +1 +4π|x|. The single and double layer potentials of the density function φ on +∂Ω are defined by +SΩ[φ](x) = +� +∂Ω +Γ(x − y)φ(y)dσ(y), +x ∈ R3, +(3.1) +DΩ[φ](x) = +� +∂Ω +∂ +∂n(y)Γ(x − y)φ(y)dσ(y), +x ∈ R3 \ ∂Ω. +(3.2) +We note that for x ∈ R3\∂Ω and y ∈ ∂Ω, Γ(x − y) and +∂ +∂n(y)Γ(x − y) are L∞-functions in y +and harmonic in x and their behaviors when |x| → +∞ are given by +Γ(x − y) = O( 1 +|x|), +∂ +∂n(y)Γ(x − y) = O( 1 +|x|2 ). +(3.3) +Therefore, we readily see that DΩ[φ] and SΩ[φ] are well defined and harmonic in R3\∂Ω and +satisfy +SΩ[φ](x) = O( 1 +|x|), +DΩ[φ](x) = O( 1 +|x|2 ), +as |x| → +∞. +(3.4) +We denote +∂w +∂n |± = n · ∇w±|∂Ω, where w+ = w|R3\Ω and w− = w|Ω. The following +formulae give the jump relations obeyed by the double layer potential and by the normal +derivative of the single layer potential. For proofs, see [9, 1]. +SΩ[φ] +�� ++(x) = SΩ[φ] +�� +−(x) +a.e. x ∈ ∂Ω, +(3.5) +∂(SΩ[φ]) +∂T +��� ++(x) = ∂(SΩ[φ]) +∂T +��� +−(x) +a.e. x ∈ ∂Ω, +(3.6) +∂(SΩ[φ]) +∂n +��� +±(x) = +� +± 1 +2I + (KΩ)∗� +[φ](x) +a.e. x ∈ ∂Ω, +(3.7) +(DΩ[φ]) +�� +±(x) = +� +∓ 1 +2I + KΩ +� +[φ](x) +a.e. x ∈ ∂Ω, +(3.8) +for φ ∈ L2(∂Ω), where KΩ is the NP operator defined by +KΩ[φ](x) = 1 +4π +� +∂Ω +⟨y − x, n(y)⟩ +|x − y|3 +φ(y)dσ(y), +and K∗ +Ω is the L2-adjoint operator of the NP operator KΩ, that is, +K∗ +Ω[φ](x) = 1 +4π +� +∂Ω +⟨x − y, n(x)⟩ +|x − y|3 +φ(y)dσ(y). +(3.9) +7 + +The operators KΩ and K∗ +Ω are singular integral operators and bounded on L2(∂Ω). Because +Ω has a C2 boundary, ∂(DΩ[φ]) +∂n +does not have a jump across ∂Ω, that is, +∂(DΩ[φ]) +∂n +��� ++(x) = ∂(DΩ[φ]) +∂n +��� +−(x), +x ∈ ∂Ω. +(3.10) +Let W 2 +1 (∂Ω) := {f ∈ L2(∂Ω) : ∂f/∂T ∈ L2(∂Ω)}. The following lemma is of importance +to us. For proof, see for example [4]. +Lemma 3.1 Let Ω be a bounded Lipschitz domain in R3. Then +(i) SΩ : L2(∂Ω) −→ W 2 +1 (∂Ω) has a bounded inverse. +(ii) KΩ : W 2 +1 (∂Ω) −→ W 2 +1 (∂Ω) is a bounded operator. +We will need the following lemma which was obtained in [9]; see also [1]. +Lemma 3.2 If Ω is a bounded C2-domain. Then DΩ[1](x) = 0 for x ∈ R3 \Ω, DΩ[1](x) = 1 +for x ∈ Ω, and KΩ[1] = 1 +2 for x ∈ ∂Ω. +3.2 +Asymptotic of layer potentials +By using the change of variable z = Ψǫ(y) = ˜y for y ∈ ∂Ω and z ∈ ∂Ωǫ, we write +SΩǫ[ ˜ψ](˜x) = − 1 +4π +� +∂Ωǫ +1 +|˜x − z| +˜ψ(z)d˜σ(z) = − 1 +4π +� +∂Ω +1 +|˜x − ˜y| +˜ψ(˜y)d˜σ(˜y), +˜x ∈ ∂Ωǫ, +for any density ˜ψ ∈ L2(∂Ωǫ). For (ξ, θ), (α, β) ∈ ϑ. Set +x = X(ξ, θ), +˜x = ˜X(ξ, θ) = x + ǫh(ξ, θ)n(x), +y = X(α, β), +˜y = ˜X(α, β) = y + ǫh(α, β)n(y), +and hence +˜x − ˜y = x − y + ǫ +� +h(ξ, θ)n(x) − h(α, β)n(y) +� +. +This gives +|˜x − ˜y| =|x − y| +� +1 + 2ǫ⟨x − y, h(x)n(x) − h(y)n(y)⟩ +|x − y|2 ++ ǫ2 +��h(x)n(x) − h(y)n(y) +��2 +|x − y|2 +� 1 +2 +:=|x − y| +� +1 + 2ǫF(x, y) + ǫ2G(x, y) +� 1 +2 . +(3.11) +We have hν ∈ C1(∂Ω). Then, one can easily see that +|F(x, y)| + |G(x, y)| +1 +2 ≤ C∥X∥C2(∂Ω)∥h∥C1(∂Ω) +for x, y ∈ ∂Ω. +Therefore, it follows from (2.5) and (3.11) that +1 +|˜x − ˜y|d˜σ(˜y) = +1 +|x − y| +� +1 + 2ǫF(x, y) + ǫ2G(x, y) +�− 1 +2 × +� ∞ +� +k=0 +ǫkσk(y)dσ(y) +� +:= +1 +|x − y| +∞ +� +k=0 +ǫkLk(x, y)dσ(y), +(3.12) +8 + +where |Lk(x, y)| ≤ C∥X∥C2(∂Ω)∥h∥C1(∂Ω), for x, y ∈ ∂Ω. In particular +L0(x, y) = 1, +L1(x, y) = −F(x, y) − 2τ(y)h(y). +Introduce a sequence of integral operators (S(k) +Ω )k∈N, defined for any ψ ∈ L2(∂Ω) by +S(k) +Ω ψ(x) := − 1 +4π +� +∂Ω +Lk(x, y) +|x − y| ψ(y)dσ(y) +for k ≥ 0. +Note that S(0) +Ω += SΩ and +S(1) +Ω [ψ](x) = 1 +2π +� +∂Ω +1 +|x − y|τ(y)h(y)ψ(y)dσ(y) ++ h(x) +4π +� +∂Ω +⟨x − y, n(x)⟩ +|x − y|3 +ψ(y)dσ(y) − 1 +4π +� +∂Ω +⟨x − y, n(y)⟩ +|x − y|3 +h(y)ψ(y)dσ(y) += − 2SΩ[τhψ](x) + h(x)∂(SΩ[ψ]) +∂n +��� +±(x) + DΩ[hψ] +�� +±(x) +for x ∈ ∂Ω. +It is easily to prove that the operator S(k) +Ω +with the kernel − 1 +4πLk(x, y)/|x − y| is bounded +on L2(∂Ω). See [9, Proposition 3.10]. +Let ˜x = Ψǫ(x) = x + ǫh(ξ, θ)n(x) for x = X(ξ, θ) ∈ ∂Ω. The following estimate holds: +���SΩǫ[ ˜ψ] ◦ Ψǫ − SΩ[ψ] − +N +� +k=1 +ǫkS(k) +Ω [ψ] +��� +L2(∂Ω) ≤ CǫN+1��ψ +�� +L2(∂Ω), +where ψ := ˜ψ ◦ Ψǫ and C depends only on N, ∥X∥C2(∂Ω), and ∥h∥C1(∂Ω). +We have +∇x +1 +|˜x − ˜y| · T (x)d˜σ(˜y) = +∞ +� +k=0 +ǫk�⟨x − y, T (x)⟩ +|x − y|3 +Lk(x, y) + ⟨∇xLk(x, y), T (x)⟩ +|x − y| +� +dσ(y) +:= +∞ +� +k=0 +ǫkKk(x, y)dσ(y). +By looking at the ∇F(x, y) · T (x) and ∇G(x, y) · T (x), we confirm that Kk(x, y) is a com- +bination linear as following +Kk(x, y) =αk(x, y)⟨x − y, T (x)⟩ +|x − y|3 ++ βk(x, y)⟨h(x)n(x) − h(y)n(y), T (x)⟩ +|x − y|3 ++ γk(x, y)⟨x − y, n(x)⟩ +|x − y|3 ++ λk(x, y)⟨h(x)n(x) − h(y)n(y), n(x)⟩ +|x − y|3 +, +where |αk(x, y)| + |βk(x, y)| + |γk(x, y)| + |λk(x, y)| ≤ C∥X∥C2(∂Ω)∥h∥C1(∂Ω), for x, y ∈ ∂Ω. +It is easily to prove that the operator ∂S(k) +Ω /∂T with the kernel − 1 +4πKk(x, y) is bounded +on L2(∂Ω). In fact, it is an immediate consequence of the celebrate theorem of Coifman- +McIntosh-Meyer, see [7]. Therefore, the following estimate holds: +���� +∂SΩǫ[ ˜ψ] ◦ Ψǫ +∂T +− ∂SΩ[ψ] +∂T +− +N +� +k=1 +ǫk ∂S(k) +Ω [ψ] +∂T +���� +L2(∂Ω) +≤ CǫN+1��ψ +�� +L2(∂Ω). +The result of the above asymptotic analysis is summarized in the following theorem. +9 + +Theorem 3.3 There exists C depending only on ∥X∥C2(∂Ω) and ∥h∥C1(∂Ω), such that for +any ˜ψ ∈ L2(∂Ωǫ), we have +���SΩǫ[ ˜ψ] ◦ Ψǫ − SΩ[ψ] − +N +� +k=1 +ǫkS(k) +Ω [ψ] +��� +W 2 +1 (∂Ω) ≤ CǫN+1��ψ +�� +L2(∂Ω), +(3.13) +where ψ := ˜ψ ◦ Ψǫ. +For ψ ∈ L2(∂Ω), we introduce +K(1) +Ω [ψ](x) =2 +� +τh∂(SΩ[ψ]) +∂n +− ∂(SΩ[τhψ]) +∂n +� ��� +±(x) + ∂(DΩ[hψ]) +∂n +(x) +− +1 +� +det(G) +� +∇ξ,θ · +� +h +� +det(G)G−1∇ξ,θSΩ[ψ] +�� +(x), +for x ∈ ∂Ω. +It was proved in [13] that the operator K(1) +Ω +is bounded in L2(∂Ω) and the following propo- +sition holds. +Proposition 3.4 There exists C depending only on ∥X∥C2(∂Ω) and ∥h∥C1(∂Ω), such that for +any ˜ψ ∈ L2(∂Ωǫ), we have +���� +∂SΩǫ[ ˜ψ] +∂˜n +◦ Ψǫ +��� +± − ∂SΩ[ψ] +∂n +��� +± − ǫK(1) +Ω [ψ] +���� +L2(∂Ω) +≤ Cǫ2��ψ +�� +L2(∂Ω), +(3.14) +where ψ := ˜ψ ◦ Ψǫ. +3.3 +Proofs of Theorems +The following lemma is of use to us. +Lemma 3.5 Let f ∈ W 2 +1 (∂Ω). The solution of the following problem + + + + + + + + + +∆w = 0 +in R3\Ω, +w = f +on ∂Ω, +lim +|x|→∞ w(x) = 0, +(3.15) +is represented as +w(x) = SΩ[φ](x) − DΩ[f](x), +x ∈ R3\Ω, +φ := ∂w +∂n +��� +∂Ω, +(3.16) +where φ ∈ L2(∂Ω) satisfies the following integral equation +SΩ[φ] = (1 +2I + KΩ)[f] +on ∂Ω. +The representation formula (3.16) is unique. +10 + +Proof. Consider the following problem + + + + + + + + + + + + + + + +∆U = 0 +in R3\∂Ω, +U|+ − U|− = f +on ∂Ω, +∂U +∂n +��� ++ − ∂U +∂n +��� +− = φ +on ∂Ω, +U(x) = O(1/|x|) +as x → ∞, +(3.17) +Let U1 = SΩ[φ] − DΩ[f] in R3. It follows from (3.4) that U1(x) = O(1/|x|) and hence U1 is +a solution of (3.17) by the jump formulae (3.5)–(3.8), and (3.10). If we put U2 = w in R3\Ω +and U2 ≡ 0 in Ω, then U2 is also a solution of (3.17). Therefore, in order to prove (3.16), it +suffices to show that the problem (3.17) has a unique solution in W 1,2 +loc(R3\∂Ω). +Suppose that U ∈ W 1,2 +loc(R3\∂Ω) is a solution of (3.17) with f = φ = 0. Then U is the +weak solution of ∆U = 0 in the entire domain R3. Therefore, for a large R, +� +BR(0) +|∇U|2dx = +� +∂BR(0) +U ∂U +∂n dσ(x) = − +� +R3\BR(0) +|∇U|2dx ≤ 0, +where BR(0) = {|x| < R}. This inequality holds for all R and hence U is constant. Since +U(x) → 0 at the infinity, we conclude that U = 0. +From(3.16) and (3.8), we get SΩ[φ] = ( 1 +2I + KΩ)[f] on ∂Ω. For f ∈ W 2 +1 (∂Ω), we have +KΩ[f] ∈ W 2 +1 (∂Ω) and hence ( 1 +2I + KΩ)[f] ∈ W 2 +1 (∂Ω). It then follows from Lemma 3.1 (i) +that φ is unique and belongs to L2(∂Ω). Therefore, the representation formula (3.16) is +unique. +According to Lemmas 3.2 and 3.5, the solution u to (1.1) has the following representation +formula +u(x) = SΩ[φ0](x) − DΩ[1](x) = SΩ[φ0](x), +x in R3\Ω, +φ0 := ∂u +∂n +��� +∂Ω, +(3.18) +where φ0 satisfies the integral equation +SΩ[φ0] = 1 +on ∂Ω. +(3.19) +Similarly to (3.18), the solution uǫ to (1.4) is represented by: +uǫ(x) = SΩǫ[φǫ](x), +x in R3\Ωǫ, +(3.20) +where φǫ is the unique solution to +SΩǫ[φǫ] = 1 +on ∂Ωǫ. +(3.21) +The following Lemma holds. +Lemma 3.6 There exists C depending only on the C2-norm of X and C1-norm of h such +that +��φǫ ◦ Ψǫ − φ0 +�� +L2(∂Ω) ≤ Cǫ, +(3.22) +where φ0 and φǫ are defined in (3.19) and (3.21), respectively. +11 + +Proof. Let x ∈ ∂Ω, then ˜x = Ψǫ(x) = x + ǫh(x)n(x) ∈ ∂Ωǫ. According to (3.19) and (3.21) +we have +SΩǫ[φǫ] ◦ Ψǫ(x) = SΩ[φ0](x), +x ∈ ∂Ω. +(3.23) +It then follows from Theorem 3.3 that +SΩ[φǫ ◦ Ψǫ − φ0](x) = O(ǫ), +x ∈ ∂Ω, +with O(ǫ) is bounded in W 2 +1 (∂Ω) by Cǫ for some constant C > 0 depending only on the +C2-norm of X and C1-norm of h. Clearly the desired estimate (3.22) immediately follows +from Lemma 3.1 (i). +3.3.1 +Proof of Theorem 1.1 +Let φǫ and φ0 be the solutions of the integral equations (3.21) and (3.19), respectively. We +denote by φ := φǫ ◦ Ψǫ. Thanks to Lemma 3.6, we write +φ = φ0 + ǫφ1, +(3.24) +with φ1 is bounded in L2(∂Ω) and still depends on ǫ. Define +vǫ(x) = SΩ[φ1](x) − 2SΩ[τhφ0](x) + DΩ[hφ0](x), +x ∈ R3\Ω. +(3.25) +It follows from Proposition 3.4 and (3.24) that +���∂uǫ +∂˜n ◦ Ψǫ − ∂u +∂n − 2ǫτh∂u +∂n − ǫ∂vǫ +∂n +��� +L2(∂Ω) ≤ Cǫ2��φ +�� +L2(∂Ω). +(3.26) +Turning to Theorem 3.3 and (3.23), we confirm that +���SΩ[φ0] − SΩ[φ] − ǫS(1) +Ω [φ] +��� +W 2 +1 (∂Ω) ≤ Cǫ2��φ +�� +L2(∂Ω). +(3.27) +Substitute φ = φ0 + ǫφ1 in (3.27), we get +���h∂SΩ[φ0] +∂n +��� ++ + SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] +�� ++ +��� +W 2 +1 (∂Ω) ≤ Cǫ +��φ +�� +L2(∂Ω), +(3.28) +that is, +∥vǫ − v∥W 2 +1 (∂Ω) ≤ Cǫ. +(3.29) +Since vǫ − v is harmonic in R3\Ω, we obtain from Lemma 3.5 that +(vǫ − v)(x) = SΩ +�∂vǫ +∂n − ∂v +∂n +� +(x) − DΩ[vǫ − v](x), +x ∈ R3\Ω, +and therefore, we deduce from (3.8) that +SΩ +�∂vǫ +∂n − ∂v +∂n +� += +�1 +2 + KΩ +� +[vǫ − v] +on ∂Ω. +(3.30) +It then follows from Lemma 3.1, (3.29), and (3.30) that +���∂vǫ +∂n − ∂v +∂n +��� +L2(∂Ω) ≤ Cǫ. +(3.31) +Finally, we prove the theorem 1.1 as desired from (3.26) and (3.31). +12 + +3.3.2 +Proof of Theorem 1.2 +It follows from Theorem 3.3, Proposition 3.4, (2.5), and (2.11) that +cap(Ωǫ) = − 1 +4π +� +∂Ω +∂SΩǫ[φǫ] +∂˜n +��� ++(˜x)SΩǫ[φǫ](˜x)d˜σ(˜x) += − 1 +4π +� +∂Ω +∂SΩ[φ] +∂n +��� ++SΩ[φ]dσ − ǫ +4π +� +∂Ω +� +− 2∂SΩ[τhφ] +∂n +��� ++ + ∂DΩ[hφ] +∂n +� +SΩ[φ]dσ +− ǫ +4π +� +∂Ω +� +− 2SΩ[τhφ] + DΩ[hφ] +�� ++ +�∂SΩ[φ] +∂n +��� ++dσ − ǫ +4π +� +∂Ω +h +�∂SΩ[φ] +∂n +��� ++ +�2 +dσ ++ ǫ +4π +� +∂Ω +1 +� +det(G) +� +∇ξ,θ · +� +h +� +det(G)G−1∇ξ,θSΩ[φ] +�� +SΩ[φ]dσ + O(ǫ2), +where φ := φǫ ◦ Ψǫ. From the decomposition of φ in (3.24), we write +cap(Ωǫ) = − 1 +4π +� +∂Ω +∂SΩ[φ0] +∂n +��� ++SΩ[φ0]dσ − ǫ +4π +� +∂Ω +h +�∂SΩ[φ0] +∂n +��� ++ +�2 +dσ +− ǫ +4π +� +∂Ω +�∂SΩ[φ1] +∂n +��� ++ − 2∂SΩ[τhφ0] +∂n +��� ++ + ∂DΩ[hφ0] +∂n +� +SΩ[φ0]dσ +− ǫ +4π +� +∂Ω +� +SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] +�� ++ +�∂SΩ[φ0] +∂n +��� ++dσ ++ ǫ +4π +� +∂Ω +1 +� +det(G) +� +∇ξ,θ · +� +h +� +det(G)G−1∇ξ,θSΩ[φ0] +�� +SΩ[φ0]dσ + O(ǫ2). +Since SΩ[φ0] = 1 on ∂Ω, we get ∇ξ,θSΩ[φ0] = 0 on ∂Ω and then the last integral is equal +to zero. By using Green’s formula, we deduce that the third integral is equal to the forth +integral. Therefore +cap(Ωǫ) =cap(Ω) − ǫ +4π +� +∂Ω +h +�∂SΩ[φ0] +∂n +��� ++ +�2 +dσ +− ǫ +2π +� +∂Ω +� +SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] +�� ++ +�∂SΩ[φ0] +∂n +��� ++dσ + O(ǫ2). +(3.32) +It follows from (3.28) that +� +∂Ω +� +SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] +�� ++ +�∂SΩ[φ0] +∂n +��� ++dσ = − +� +∂Ω +h +�∂SΩ[φ0] +∂n +��� ++ +�2 +dσ + O(ǫ). +(3.33) +Finally, we conclude from the representation formula of u (3.18), (3.32), and (3.33) that +cap(Ωǫ) =cap(Ω) + ǫ +4π +� +∂Ω +h +�∂u +∂n +�2 +dσ + O(ǫ2). +This completes the proof of Theorem 1.2, as desired. +13 + +3.3.3 +Proof of Theorem 1.3 +By (3.20) and (3.18), we have +uǫ(x) − u(x) = − 1 +4π +� +∂Ω +Γ(x − ˜y)∂uǫ +∂˜n (˜y)d˜σ(˜y) + 1 +4π +� +∂Ω +Γ(x − y)∂u +∂n(y)dσ(y). +It then follows from Theorem 1.1, (2.5), and (3.3) that +uǫ(x) − u(x) = − ǫ +4π +� +∂Ω +Γ(x − ˜y) ∂v +∂n(y)dσ(y) +− 1 +4π +� +∂Ω +� +Γ(x − ˜y) − Γ(x − y) +�∂u +∂n(y)dσ(y) + O( ǫ2 +|x|). +Since +Γ(x − ˜y) = 1 +|x| + O( 1 +|x|2 ), +Γ(x − ˜y) − Γ(x − y) = O( ǫ +|x|2 ) +as |x| → +∞. +Therefore +uǫ(x) − u(x) = − +ǫ +4π|x| +� +∂Ω +∂v +∂n(y)dσ(y) + O( ǫ +|x|2 ) + O( ǫ2 +|x|). +According to the Green’s formula, we immediately see that +� +∂Ω +∂v +∂ndσ = +� +∂Ω +∂v +∂nudσ = +� +∂Ω +v ∂u +∂ndσ = − +� +∂Ω +h +�∂u +∂n +�2 +dσ. +This completes the proof of Theorem 1.3. +4 +Sensitivity analysis of the first eigenvector of the op- +erator K∗ +Ω with respect to small perturbations in the +surface of its domain +Let Ω be a bounded domain in R3 with C2-boundary ∂Ω. The spectrum of K∗ +Ω : L2(∂Ω) → +L2(∂Ω) is discrete, lies in the interval (− 1 +2, 1 +2], and accumulates at zero. More precisely, let +{λj}∞ +0 be the eigenvalues of K∗ +Ω on L2(∂Ω), then, the first eigenvalue λ0 is equal to 1/2 and +has geometric multiplicity 1 while λj ∈ (− 1 +2, 1 +2) for j ≥ 1 with |λ1| ≥ |λ2| ≥ · · · → 0 as +j → ∞ arranged repeatedly according to their multiplicities. See for example [3]. +Denote by ϕ0 the first eigenvector of K∗ +Ω on L2(∂Ω) associated to the first eigenvalue 1/2 +with ∥ϕ0∥L2(∂Ω) = 1. We claim that ϕ0 is equal to ∂u +∂n/∥ ∂u +∂n∥L2(∂Ω), where u represents the +electrostatic potential in the presence of the conductor Ω in electrostatic equilibrium, it is +the unique solution of (1.1). In fact, it follows from (3.18) and (3.7) that +∂u +∂n = +�1 +2I + K∗ +Ω +��∂u +∂n +� +on ∂Ω, +namely, +K∗ +Ω +�∂u +∂n +� += 1 +2 +∂u +∂n +on ∂Ω. +(4.1) +14 + +From the uniqueness of ϕ0, we deduce that ϕ0 = ∂u +∂n/∥ ∂u +∂n∥L2(∂Ω) on ∂Ω. +It is known that the first eigenvalue 1/2 is independent of ∂Ω, that is, it does not affected +by any smooth perturbations of ∂Ω. In view of this remark, the electrostatic capacity of an +isolated conductor may also be defined as the amount of a charge required to raise the first +eigenvalue of the K∗ +Ω operator at 1/2. +Similarly to (4.1), we have +K∗ +Ωǫ +�∂uǫ +∂˜n +� += 1 +2 +∂uǫ +∂˜n +on ∂Ωǫ, +where uǫ is the unique solution of (1.4). Therefore the first eigenvector ϕǫ +0 of the operator +K∗ +∂Ωǫ on L2(∂Ωǫ) with the eigenvalue 1/2 is equal to ∂uǫ +∂˜n /∥ ∂uǫ +∂˜n ∥L2(∂Ωǫ). +From Theorem 1.1, we obtain in the following theorem the fourth result of this paper, +an asymptotic expansion for the first eigenvector ϕǫ +0 on ∂Ωǫ as ǫ → 0. +Theorem 4.1 Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω. +Let ϕǫ +0 and ϕ0 be the first +eigenvectors of K∗ +Ωǫ and K∗ +Ω with the eigenvalue 1/2, respectively. The following asymptotic +expansion holds: +ϕǫ +0(˜x) =ϕ0(x) + 2ǫτ(x)h(x)ϕ0(x) + ǫ˜v(x) − ǫ⟨τhϕ0 + ˜v, ϕ0⟩ϕ0(x) + O(ǫ2), +with ˜v = +∂v +∂n/∥ ∂u +∂n∥L2(∂Ω), where u and v are the unique solutions of (1.1) and (1.8), +respectively, and the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm +of h. +The fifth result of this paper is the following theorem, an asymptotic expansion of +� +∂Ω +� +ϕǫ +0(˜x) − ϕ0(x) +� +ϕ0(x)dσ(x) as ǫ → 0. +Theorem 4.2 Let ϕǫ +0 and ϕ0 be the first eigenvectors of K∗ +Ωǫ and K∗ +Ω with the eigenvalue +1/2, respectively. The following asymptotic expansion holds: +� +∂Ω +� +ϕǫ +0(˜x) − ϕ0(x) +� +ϕ0(x)dσ(x) =ǫ +� +∂Ω +τ(x)h(x)[ϕ0(x)]2dσ(x) + O(ǫ2), +(4.2) +where the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h. +The asymptotic expansion (4.2) could be used to determine some properties on the shape +perturbation of an object from measurements on the perturbed shape itself (see [24]) of the +first eigenvector of the L2-adjoint of the NP operator. +References +[1] H. Ammari, An Introduction to Mathematics of Emerging Biomedical Imaging, Math. +Appl., Volume 62, Springer, Berlin, 2008. +[2] H. Ammari, E. Beretta, E. Francini, H. Kang, and M. Lim, Optimization algorithm for +reconstruction interface changes of a conductivity inclusion from modal measurements, +Math. comp., 79 (2010), 1757-1777. +[3] H. Ammari, B. Fitzpatrick, H. Kang, M. Ruiz, S. Yu, and H. Zhang, Mathematical +and Computational Methods in Photonics and Phononics, Mathematical Surveys and +Monographs, Volume 235, American Mathematical Society, Providence, 2018. +15 + +[4] H. Ammari and H. Kang Polarization and Moment Tensors with Applications to In- +verse Problems and Effective Medium Theory, Applied Mathematical Sciences, Vol. 162, +SpringerVerlag, New York, 2007. +[5] H. Ammari, H. Kang, M. Lim, and H. Zribi, Conductivity interface problems. Part I: +small perturbations of an interface, Trans. Amer. Math. Soc., 362 (2010), 2435-2449. +[6] H. Ammari, H. Kang, M. Lim, and H. Zribi, The generalized polarization tensors for +resolved imaging. Part I: shape reconstruction of a conductiovity inclusion, Math. of +comp., 81 (2012), 367-386. +[7] R.R. Coifman, A. McIntosh, and Y. Meyer, L’int´egrale de Cauchy d´efinit un op´erateur +born´e sur L2 pour les courbes lipschitziennes, Ann. Math., 116 (1982), 361–387. +[8] R. Coifman, M. Goldberg, T. Hrycak, M. Israeli, and V. Rokhlin, An improved operator +expantion algorithm for direct and inverse scattering computations, Waves Random +Media, 9 (1999), 441-457. +[9] G.B. Folland, Introduction to Partial Differential Equations, Princeton University +Press, Princeton, New Jersey, 1976. +[10] D. Jerison, A Minkowski problem for electrostatic capacity, Acta Math., 176 (1996) +1-47. +[11] O.D. Kellogg, Foundations of Potential Theory, Dover, New York, 1953. +[12] A. Khelifi and H. Zribi, Asymptotic expansions for the voltage potentials with two- and +three-dimensional thin interfaces, Math. Methods. Appl. Sci., 34 (2011), 2274-2290. +[13] A. Khelifi and H. Zribi, Boundary voltage perturbations resulting from small surface +changes of a conductivity inclusion, Appl. Anal., Vol. 93 (2014), 46-64. +[14] J. Lagha, F. Triki, and H. Zribi, Small perturbations of an interface for elastostatic +problems, Math. Methods. Appl. Sci., 40 (10)(2017), 3608-3636. +[15] J. Lagha and H. Zribi, An asymptotic expansion for perturbations in the displacement +field due to the presence of thin interfaces, Appl. Anal., volume 1, (2017), 1-23. +[16] M. Lim, K. Louati and H. Zribi, Reconstructing small perturbations of scatterers from +electric or acoustic far-field measurements, Math. Methods. Appl. Sci., 31 (2008), no +11, 1315-1332. +[17] S. J. Ling, J. Sanny, and W. Moebs, University Physics - Volume 2 (OpenStax), (2016). +[18] J. C. Maxwell, An elementary treatise on electricity, Clarendon Press in Oxford, 1881. +[19] H. Poincar´e, Figures d’´equilibre d’une masse fluide, Paris, 1902. +[20] G. Polya, Estimating electrostatic capacity, Am. Math. Mon., 54, no. 4 (1947), 201–206. +[21] G. P´olya and G. Szeg¨o, Isoperimetric Inequalities in Mathematical Physics, Annals of +Mathematical Studies, Number 27, Princeton University Press, Princeton, NJ, 1951. +16 + +[22] G. Szeg¨o, On the capacity of a condenser, Bull. Amer. Math. Soc. vol. 51 (1945) pp. +325-350. +[23] H. Zribi, Asymptotic expansions for currents caused by small interface changes of an +electromagnetic inclusion, Appl. Anal., 92, (2013), 172-190. +[24] H. Zribi, Reconstructing small perturbations of an obstacle for acoustic waves from +boundary measurements on the perturbed shape itself, Math. Methods. Appl. Sci., 45 +(2022), no 1, 93-112. +17 + diff --git a/VdAyT4oBgHgl3EQf8vrJ/content/tmp_files/load_file.txt b/VdAyT4oBgHgl3EQf8vrJ/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..17aca42ae169aa381609cceef19b7cf3f3046604 --- /dev/null +++ b/VdAyT4oBgHgl3EQf8vrJ/content/tmp_files/load_file.txt @@ -0,0 +1,552 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf,len=551 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='00863v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='AP] 2 Jan 2023 Sensitivity Analysis of the Current, the Electrostatic Capacity, and the Far-Field of the Potential with Respect to Small Perturbations in the Surface of a Conductor Jihene Lagha ∗ Habib Zribi† Abstract We derive asymptotic expansions of the current, the electrostatic capacity, and the far-field of the electrostatic potential resulting from small perturbations in the shape of an isolated conductor with C2-surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Our derivation is rigorous by using systematic way, based on layer potential techniques and the field expansion (FE) method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We then use these results to study the sensibility analysis of the first eigenvector of the L2-adjoint of the Neumann-Poincar´e (NP) operator with respect to small perturbations in the surface of its domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Mathematics Subject Classification (MSC2000): 35B30, 35B40 Keywords: Isolated conductor, electrostatic capacity, small surface perturbations, boundary integral method, field expansion method, Laplace equation, Neumann-Poincar´e operator type 1 Introduction and statement of the main results Suppose that an isolated conductor occupies a bounded domain Ω in R3, with a connected C2-surface ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' A conductor is a volume which contains free charges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In the presence of an external electric field, each free charge in the conductor redistributes and very quickly reaches electrostatic equilibrium (see [17, 18]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The free charges are redistributed in such a way the electric field inside the conductor vanishes and the electrical filed is always perpen- dicular everywhere on the surface of the conductor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The electrostatic potential is constant throughout the volume of the conductor and has the same value on its surface, let’s say 1 volt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' More precisely, let u be the electrostatic potential in the presence of a conductor Ω at equilibrium in R3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is the unique solution of the following problem \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∆u = 0 in R3\\Ω, u = 1 on ∂Ω, lim |x|→∞ u(x) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) ∗Universit´e de Tunis El Manar, Facult´e des Sciences de Tunis, LR11ES13 Laboratoire d’Analyse stochas- tique et Applications, 2092 Tunis, Tunisie (lagha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='jihene@yahoo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='fr) †Department of Mathematics, College of Science, University of Hafr Al Batin, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 1803, Hafr Al Batin 31991, Saudi Arabia (zribi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='habib@yahoo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='fr) 1 The electrostatic capacity with respect to infinity of the conductor Ω, denoted cap(Ω), is defined as the ratio of the charge in equilibrium on it to the value of the potential u at its surface ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' That is, the capacity is the charge producing this potential which is given by Gauss’ integral (see for instance [19, 21, 11]) cap(Ω) = − 1 4π � ∂Ω ∂u ∂n(x)dσ(x) � = − 1 4π � ∂Ω ∂u ∂n(x)u(x)dσ(x) � , (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) where n and dσ are the unit outward normal and the length element to the boundary ∂Ω, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The electrostatic capacity cap(Ω) may also be defined as the quantity of electrical charge which must be given to the conductor Ω to raise its potential to the value unity, it depends on its own form and size;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' being greater as the seize increased.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In this work, we investigate the sensitivity analysis of the electrostatic capacity with small changes in the form of its domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is well-known that 0 ≤ cap(Ω) < +∞, which can be easily proved by applying the Green’s identity to the integral in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) over the unbounded domain R3\\Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The electrostatic capacity can be determined from the far-field of the electrostatic potential u defined in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In fact, in [20, 10], u has the asymptotic expansion u(x) = cap(Ω) |x| + O( 1 |x|2 ) as |x| → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3) Therefore, in order to find the electrostatic capacity, we have to pick out the coefficient of 1/|x| in the asymptotic expansion (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The capacities are known analytically for a few simple shapes like sphere, ellipsoid, lens, spindle, and anchor-ring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' See [22, 21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let Ωǫ be an ǫ-perturbation of Ω, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', there is a function h ∈ C1(∂Ω) such that ∂Ωǫ is given by ∂Ωǫ = � ˜x = x + ǫh(x)n(x) := Ψǫ(x)|x ∈ ∂Ω � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We denote by uǫ the perturbed electrostatic potential in the presence of the conductor Ωǫ in electrostatic equilibrium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is the unique solution of the following problem \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∆uǫ = 0 in R3\\Ωǫ, uǫ = 1 on ∂Ωǫ, lim |x|→∞ uǫ(x) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) The perturbed electrostatic capacity with respect to infinity of the conductor Ωǫ is given by cap(Ωǫ) = − 1 4π � ∂Ωǫ ∂uǫ ∂˜n (y)d˜σ(y), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5) where ˜n and d˜σ are the unit outward normal and the length element to the boundary ∂Ωǫ, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Similarly to (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3), uǫ satisfies uǫ(x) = cap(Ωǫ) |x| + O( 1 |x|2 ) as |x| → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6) Our goal is to find asymptotic expansions for the current, the electrostatic capacity, and the far-field of the electrostatic potential resulting from small perturbations on the surface 2 of a conductor at equilibrium in free space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The main idea is to adopt the FE method to derive formal asymptotic expansions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then, based on layer potential techniques, we prove rigorously those asymptotic expansions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In connection with this, we refer to recent works in the same context [13, 23, 24, 16, 5, 6, 14, 12, 15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The first achievement of this paper is the following theorem, a rigorous derivation of the asymptotic expansion of the perturbed current ∂uǫ/∂˜n on ∂Ωǫ as ǫ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let uǫ and u be the solutions of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following asymptotic expansion holds: ∂uǫ ∂˜n (˜x) = ∂u ∂n(x) + ǫ � 2τ(x)h(x)∂u ∂n(x) + ∂v ∂n(x) � + O(ǫ2), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7) where the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h, τ is the mean curvature of Ω, and v is the unique solution to \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∆v = 0 in R3\\Ω, v = −h∂u ∂n on ∂Ω, lim |x|→∞ v(x) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8) The second result of this paper is the following theorem, we rigorously derive the asymp- totic expansion of cap(Ωǫ) as ǫ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 The following asymptotic expansion holds: cap(Ωǫ) = cap(Ω) + ǫ 4π � ∂Ω h �∂u ∂n �2 dσ + O(ǫ2), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='9) where the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is worth noticing that if h has a constant sign on ∂Ω that there exists ǫ0 > 0 such that for ǫ < ǫ0, cap(Ωǫ) − cap(Ω) has the same sign as h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following theorem represents the third result of this paper, a rigorous derivation of the asymptotic expansion of the far-field of the electrostatic potential uǫ as ǫ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 Let uǫ and u be the solutions of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We have the following asymptotic expansion uǫ(x) = u(x) + ǫ 4π|x| � ∂Ω h �∂u ∂n �2 dσ + O � ǫ2 |x| � + O � ǫ |x|2 � (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10) as |x| → ∞ and ǫ → 0, where the remainders O(ǫ2/|x|) and O(ǫ/|x|2) depend only on the C2-norm of X, the C1-norm of h, and the distance(origin, Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' These asymptotic expansions had not been established before this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' They can be taken into account in the design of conductors to avoid negative effects due to small changes in their shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Our asymptotic expansions are still valid in the case of small perturbations of one of the walls of a condenser in electrostatic equilibrium, the FE method works well, but more elaborate arguments are needed for the layer potential techniques method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 3 The asymptotic expansions can be used to design effective algorithms to recover certain properties of the perturbation of the shape of an isolated conductor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' That is, we would like to find a method for determining the shape of a conductor by taking one or a combination of current, electrostatic capacity, and electrostatic potential measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' One of solutions is to extend that optimization approach in [2] by using electrostatic capacity measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' This article is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In the next section we formally derive the asymptotic expansions (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='9), and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10) by using the FE method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In the section 2, based on layer potential techniques method we rigorously prove those in fact the formal expansions hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In the last section, we derive an asymptotic expansion of the first eigenvector of the L2−adjoint of the NP operator resulting from small perturbations of the surface of its domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 2 Formal derivations: the FE method We refer to [13] for further details on the following concepts and definitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Suppose ∂Ω has an orthogonal parametrization X(ξ, θ), that is, there is an open subset ϑ of R2 such that ∂Ω := � x = X(ξ, θ), (ξ, θ) ∈ ϑ � , where X is a C2-function satisfying (Xξ := dX dξ ) · (Xθ := dX dθ ) = 0 and Xξθ = Xθξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We point out that a revolution surface has an orthogonal parametrization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The vectors Tξ = Xξ/|Xξ| and Tθ = Xθ/|Xθ| form an orthonormal basis for the tangent plane to ∂Ω at x = X(ξ, θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The tangential derivative on ∂Ω is defined by ∂ ∂T = ∂ ∂Tξ Tξ + ∂ ∂Tθ Tθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We denote by G the matrix of the first fundamental form with respect to the basis {Xξ, Xθ}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For w ∈ C1(ϑ), the gradient operator in local coordinates satisfies ∇ξ,θw = � G11 ∂w ∂Tξ Tξ + � G22 ∂w ∂Tθ Tθ = G 1 2 ∂w ∂T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) For w ∈ C2(ϑ), the restriction of ∆ in R3\\∂Ω to a neighbourhood of ∂Ω can be expressed as: ∆w = ∂2w ∂n2 − 2τ ∂w ∂n + ∆Gw, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) where ∆G is the Laplace–Beltrami operator associated to G which is given by ∆Gw = 1 √ detG ∇ξ,θ · �√ detGG−1∇ξ,θw � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3) We use h(ξ, θ) for simplifying the term h(X(ξ, θ)) and hξ(ξ, θ), hθ(ξ, θ) for the tangential derivatives of h(X(ξ, θ)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then, ˜x = X(ξ, θ) + ǫh(ξ, θ)n(x) is a parametrization of ∂Ωǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following asymptotic expansions for the normal derivative ˜n(˜x) and the length ele- ment d˜σ(˜x) hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For proofs, see [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then, the outward unit normal ˜n(˜x) to ∂Ωǫ at ˜x can be expanded uniformly as ˜n(˜x) := ˜Xξ ∧ ˜Xθ | ˜Xξ ∧ ˜Xθ| = ∞ � k=0 ǫknk(x), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) 4 where the vector-valued functions nk are uniformly bounded regardless of k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In particular, for x ∈ ∂Ω, we have n0(x) = n(x), n1(x) = − ∂h ∂T (x)T (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Likewise, the length element d˜σ(˜x) has the following uniformly expansion d˜σ(˜x) := | ˜Xξ ∧ ˜Xθ|dξdθ = | ˜Xξ ∧ ˜Xθ| � det(G) dσ(x) = ∞ � k=0 ǫkσ(k)(x)dσ(x), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5) where σ(k) are uniformly bounded regardless of k with σ(0)(x) = 1, σ(1)(x) = −2h(x)τ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let uǫ be the solution to (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In order to derive a formal asymptotic expansion for uǫ, we apply the FE method, see [24, 8, 13, 16, 23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Firstly, we expand uǫ in powers of ǫ, uǫ(x) = u0(x) + ǫu1(x) + ǫ2u2(x) + · · · , x ∈ R3\\Ωǫ, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6) where ul are defined on R3\\∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Since ul satisfy \uf8f1 \uf8f2 \uf8f3 ∆ul = 0 in R3\\Ω, lim |x|→∞ ul(x) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7) In order to justify the first equation in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7), we substitute (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6) in the Laplace equation ∆uǫ = 0 in R3\\Ωǫ to get ∆ul = 0 in R3\\Ωǫ for ǫ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Because ǫ is arbitrary, we confirm that ∆ul = 0 in R3\\Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following Taylor expansion holds: ∂uǫ ∂˜n (˜x) = � ∇u0 � x + ǫh(x)n(x) � + ǫ∇u1 � x + ǫh(x)n(x) �� ˜n(˜x) + O(ǫ2) = � ∇u0(x) + ǫh(x)∇2u0(x)n(x) + ǫ∇u1(x) � � n(x) − ǫ ∂h ∂T (x)T (x) � + O(ǫ2) =∂u0 ∂n (x) + ǫh(x)∂2u0 ∂n2 (x) + ǫ∂u1 ∂n (x) − ǫ ∂h ∂T (x) · ∂u0 ∂T (x) + O(ǫ2) =∂u0 ∂n (x) + 2ǫτ(x)h(x)∂u0 ∂n (x) + ǫ∂u1 ∂n (x) − ǫ ∂h ∂T (x) · ∂u0 ∂T (x) − ǫh(x)∆Gu0(x) + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8) To justify the last equality, we use the representation of the Laplace operator on ∂Ω given in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) 0 = ∆u0 = ∂2u0 ∂n2 − 2τ ∂u0 ∂n + ∆Gu0 on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 5 For ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We have the following Taylor expansion uǫ(˜x) = u0 � x + ǫh(x)n(x) � + ǫu1 � x + ǫh(x)n(x) � + O(ǫ2) = u0(x) + ǫh(x)∂u0 ∂n (x) + ǫu1(x) + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='9) Using the boundary condition uǫ(˜x) = 1 for ˜x = Ψ(x) ∈ ∂Ωǫ, we obtain from (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='9) that u0(x) = 1, u1(x) = −h(x)∂u0 ∂n (x), for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10) By the uniqueness of the PDE problems (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8), we get u0 ≡ u and u1 ≡ v in R3\\Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The fourth and fifth terms in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8) vanish, this is because u = 1 on ∂Ω which implies that ∂u/∂T = 0 on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 immediately follows from (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8) formally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' A change of variables y = Ψǫ(x) for x ∈ ∂Ω in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5) gives cap(Ωǫ) = − 1 4π � ∂Ω ∂uǫ ∂˜n (˜x)uǫ(˜x)d˜σ(˜x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='11) It then follows from (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8), and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='9) that cap(Ωǫ) = − 1 4π � ∂Ω ∂u ∂nudσ − ǫ 4π � ∂Ω h �∂u ∂n �2 dσ − ǫ 4π � ∂Ω ∂v ∂nudσ − ǫ 4π � ∂Ω ∂u ∂nvdσ + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='12) By Green’s identity and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7), we have � ∂Ω ∂v ∂nudσ = � ∂Ω ∂u ∂nvdσ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We get from (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10) that � ∂Ω ∂v ∂nudσ + � ∂Ω ∂u ∂nvdσ = 2 � ∂Ω ∂u ∂nvdσ = −2 � ∂Ω h �∂u ∂n �2 dσ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='13) Thus, by (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='12), and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='13), we formally obtain the desired Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', cap(Ωǫ) = cap(Ω) + ǫ 4π � ∂Ω h �∂u ∂n �2 dσ + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='14) As a direct consequence of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6), and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='14), the leading order term in the asymptotic expansion of the far-field uǫ − u in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 holds formally uǫ(x) − u(x) = ǫ 4π|x| � ∂Ω h �∂u ∂n �2 dσ + O( ǫ2 |x|) + O( 1 |x|2 ) (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='15) as ǫ → 0 and ǫ >> 1/|x|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' By the layer potential techniques method we will prove in the subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 the asymptotic expansion (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='15) with a remainder O(ǫ2/|x|) + O(ǫ/|x|2) as ǫ → 0 and |x| → ∞ which is more better than O(ǫ2/|x|)+O(1/|x|2) as ǫ → 0 and ǫ >> 1/|x|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 6 3 Layer potential techniques method 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Definitions and Preliminary results Let Ω be a bounded C2-domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let Γ(x) be the fundamental solution of the Laplacian ∆ in R3: Γ(x) = − 1 4π|x|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The single and double layer potentials of the density function φ on ∂Ω are defined by SΩ[φ](x) = � ∂Ω Γ(x − y)φ(y)dσ(y), x ∈ R3, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) DΩ[φ](x) = � ∂Ω ∂ ∂n(y)Γ(x − y)φ(y)dσ(y), x ∈ R3 \\ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) We note that for x ∈ R3\\∂Ω and y ∈ ∂Ω, Γ(x − y) and ∂ ∂n(y)Γ(x − y) are L∞-functions in y and harmonic in x and their behaviors when |x| → +∞ are given by Γ(x − y) = O( 1 |x|), ∂ ∂n(y)Γ(x − y) = O( 1 |x|2 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3) Therefore, we readily see that DΩ[φ] and SΩ[φ] are well defined and harmonic in R3\\∂Ω and satisfy SΩ[φ](x) = O( 1 |x|), DΩ[φ](x) = O( 1 |x|2 ), as |x| → +∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) We denote ∂w ∂n |± = n · ∇w±|∂Ω, where w+ = w|R3\\Ω and w− = w|Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following formulae give the jump relations obeyed by the double layer potential and by the normal derivative of the single layer potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For proofs, see [9, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' SΩ[φ] �� +(x) = SΩ[φ] �� −(x) a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' x ∈ ∂Ω, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5) ∂(SΩ[φ]) ∂T ��� +(x) = ∂(SΩ[φ]) ∂T ��� −(x) a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' x ∈ ∂Ω, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6) ∂(SΩ[φ]) ∂n ��� ±(x) = � ± 1 2I + (KΩ)∗� [φ](x) a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' x ∈ ∂Ω, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7) (DΩ[φ]) �� ±(x) = � ∓ 1 2I + KΩ � [φ](x) a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' x ∈ ∂Ω, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8) for φ ∈ L2(∂Ω), where KΩ is the NP operator defined by KΩ[φ](x) = 1 4π � ∂Ω ⟨y − x, n(y)⟩ |x − y|3 φ(y)dσ(y), and K∗ Ω is the L2-adjoint operator of the NP operator KΩ, that is, K∗ Ω[φ](x) = 1 4π � ∂Ω ⟨x − y, n(x)⟩ |x − y|3 φ(y)dσ(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='9) 7 The operators KΩ and K∗ Ω are singular integral operators and bounded on L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Because Ω has a C2 boundary, ∂(DΩ[φ]) ∂n does not have a jump across ∂Ω, that is, ∂(DΩ[φ]) ∂n ��� +(x) = ∂(DΩ[φ]) ∂n ��� −(x), x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10) Let W 2 1 (∂Ω) := {f ∈ L2(∂Ω) : ∂f/∂T ∈ L2(∂Ω)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following lemma is of importance to us.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For proof, see for example [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Let Ω be a bounded Lipschitz domain in R3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then (i) SΩ : L2(∂Ω) −→ W 2 1 (∂Ω) has a bounded inverse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (ii) KΩ : W 2 1 (∂Ω) −→ W 2 1 (∂Ω) is a bounded operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We will need the following lemma which was obtained in [9];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' see also [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 If Ω is a bounded C2-domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then DΩ[1](x) = 0 for x ∈ R3 \\Ω, DΩ[1](x) = 1 for x ∈ Ω, and KΩ[1] = 1 2 for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 Asymptotic of layer potentials By using the change of variable z = Ψǫ(y) = ˜y for y ∈ ∂Ω and z ∈ ∂Ωǫ, we write SΩǫ[ ˜ψ](˜x) = − 1 4π � ∂Ωǫ 1 |˜x − z| ˜ψ(z)d˜σ(z) = − 1 4π � ∂Ω 1 |˜x − ˜y| ˜ψ(˜y)d˜σ(˜y), ˜x ∈ ∂Ωǫ, for any density ˜ψ ∈ L2(∂Ωǫ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For (ξ, θ), (α, β) ∈ ϑ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Set x = X(ξ, θ), ˜x = ˜X(ξ, θ) = x + ǫh(ξ, θ)n(x), y = X(α, β), ˜y = ˜X(α, β) = y + ǫh(α, β)n(y), and hence ˜x − ˜y = x − y + ǫ � h(ξ, θ)n(x) − h(α, β)n(y) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' This gives |˜x − ˜y| =|x − y| � 1 + 2ǫ⟨x − y, h(x)n(x) − h(y)n(y)⟩ |x − y|2 + ǫ2 ��h(x)n(x) − h(y)n(y) ��2 |x − y|2 � 1 2 :=|x − y| � 1 + 2ǫF(x, y) + ǫ2G(x, y) � 1 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='11) We have hν ∈ C1(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then, one can easily see that |F(x, y)| + |G(x, y)| 1 2 ≤ C∥X∥C2(∂Ω)∥h∥C1(∂Ω) for x, y ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore, it follows from (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='11) that 1 |˜x − ˜y|d˜σ(˜y) = 1 |x − y| � 1 + 2ǫF(x, y) + ǫ2G(x, y) �− 1 2 × � ∞ � k=0 ǫkσk(y)dσ(y) � := 1 |x − y| ∞ � k=0 ǫkLk(x, y)dσ(y), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='12) 8 where |Lk(x, y)| ≤ C∥X∥C2(∂Ω)∥h∥C1(∂Ω), for x, y ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In particular L0(x, y) = 1, L1(x, y) = −F(x, y) − 2τ(y)h(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Introduce a sequence of integral operators (S(k) Ω )k∈N, defined for any ψ ∈ L2(∂Ω) by S(k) Ω ψ(x) := − 1 4π � ∂Ω Lk(x, y) |x − y| ψ(y)dσ(y) for k ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Note that S(0) Ω = SΩ and S(1) Ω [ψ](x) = 1 2π � ∂Ω 1 |x − y|τ(y)h(y)ψ(y)dσ(y) + h(x) 4π � ∂Ω ⟨x − y, n(x)⟩ |x − y|3 ψ(y)dσ(y) − 1 4π � ∂Ω ⟨x − y, n(y)⟩ |x − y|3 h(y)ψ(y)dσ(y) = − 2SΩ[τhψ](x) + h(x)∂(SΩ[ψ]) ∂n ��� ±(x) + DΩ[hψ] �� ±(x) for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is easily to prove that the operator S(k) Ω with the kernel − 1 4πLk(x, y)/|x − y| is bounded on L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' See [9, Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let ˜x = Ψǫ(x) = x + ǫh(ξ, θ)n(x) for x = X(ξ, θ) ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following estimate holds: ���SΩǫ[ ˜ψ] ◦ Ψǫ − SΩ[ψ] − N � k=1 ǫkS(k) Ω [ψ] ��� L2(∂Ω) ≤ CǫN+1��ψ �� L2(∂Ω), where ψ := ˜ψ ◦ Ψǫ and C depends only on N, ∥X∥C2(∂Ω), and ∥h∥C1(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We have ∇x 1 |˜x − ˜y| · T (x)d˜σ(˜y) = ∞ � k=0 ǫk�⟨x − y, T (x)⟩ |x − y|3 Lk(x, y) + ⟨∇xLk(x, y), T (x)⟩ |x − y| � dσ(y) := ∞ � k=0 ǫkKk(x, y)dσ(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' By looking at the ∇F(x, y) · T (x) and ∇G(x, y) · T (x), we confirm that Kk(x, y) is a com- bination linear as following Kk(x, y) =αk(x, y)⟨x − y, T (x)⟩ |x − y|3 + βk(x, y)⟨h(x)n(x) − h(y)n(y), T (x)⟩ |x − y|3 + γk(x, y)⟨x − y, n(x)⟩ |x − y|3 + λk(x, y)⟨h(x)n(x) − h(y)n(y), n(x)⟩ |x − y|3 , where |αk(x, y)| + |βk(x, y)| + |γk(x, y)| + |λk(x, y)| ≤ C∥X∥C2(∂Ω)∥h∥C1(∂Ω), for x, y ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is easily to prove that the operator ∂S(k) Ω /∂T with the kernel − 1 4πKk(x, y) is bounded on L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In fact, it is an immediate consequence of the celebrate theorem of Coifman- McIntosh-Meyer, see [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore, the following estimate holds: ���� ∂SΩǫ[ ˜ψ] ◦ Ψǫ ∂T − ∂SΩ[ψ] ∂T − N � k=1 ǫk ∂S(k) Ω [ψ] ∂T ���� L2(∂Ω) ≤ CǫN+1��ψ �� L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The result of the above asymptotic analysis is summarized in the following theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 9 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 There exists C depending only on ∥X∥C2(∂Ω) and ∥h∥C1(∂Ω), such that for any ˜ψ ∈ L2(∂Ωǫ), we have ���SΩǫ[ ˜ψ] ◦ Ψǫ − SΩ[ψ] − N � k=1 ǫkS(k) Ω [ψ] ��� W 2 1 (∂Ω) ≤ CǫN+1��ψ �� L2(∂Ω), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='13) where ψ := ˜ψ ◦ Ψǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For ψ ∈ L2(∂Ω), we introduce K(1) Ω [ψ](x) =2 � τh∂(SΩ[ψ]) ∂n − ∂(SΩ[τhψ]) ∂n � ��� ±(x) + ∂(DΩ[hψ]) ∂n (x) − 1 � det(G) � ∇ξ,θ · � h � det(G)G−1∇ξ,θSΩ[ψ] �� (x), for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It was proved in [13] that the operator K(1) Ω is bounded in L2(∂Ω) and the following propo- sition holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4 There exists C depending only on ∥X∥C2(∂Ω) and ∥h∥C1(∂Ω), such that for any ˜ψ ∈ L2(∂Ωǫ), we have ���� ∂SΩǫ[ ˜ψ] ∂˜n Ψǫ ��� ± − ∂SΩ[ψ] ∂n ��� ± − ǫK(1) Ω [ψ] ���� L2(∂Ω) ≤ Cǫ2��ψ �� L2(∂Ω), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='14) where ψ := ˜ψ ◦ Ψǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 Proofs of Theorems The following lemma is of use to us.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5 Let f ∈ W 2 1 (∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The solution of the following problem \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∆w = 0 in R3\\Ω, w = f on ∂Ω, lim |x|→∞ w(x) = 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='15) is represented as w(x) = SΩ[φ](x) − DΩ[f](x), x ∈ R3\\Ω, φ := ∂w ∂n ��� ∂Ω, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='16) where φ ∈ L2(∂Ω) satisfies the following integral equation SΩ[φ] = (1 2I + KΩ)[f] on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The representation formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='16) is unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 10 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Consider the following problem \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 ∆U = 0 in R3\\∂Ω, U|+ − U|− = f on ∂Ω, ∂U ∂n ��� + − ∂U ∂n ��� − = φ on ∂Ω, U(x) = O(1/|x|) as x → ∞, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='17) Let U1 = SΩ[φ] − DΩ[f] in R3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) that U1(x) = O(1/|x|) and hence U1 is a solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='17) by the jump formulae (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5)–(3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8), and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' If we put U2 = w in R3\\Ω and U2 ≡ 0 in Ω, then U2 is also a solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore, in order to prove (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='16), it suffices to show that the problem (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='17) has a unique solution in W 1,2 loc(R3\\∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Suppose that U ∈ W 1,2 loc(R3\\∂Ω) is a solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='17) with f = φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Then U is the weak solution of ∆U = 0 in the entire domain R3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore, for a large R, � BR(0) |∇U|2dx = � ∂BR(0) U ∂U ∂n dσ(x) = − � R3\\BR(0) |∇U|2dx ≤ 0, where BR(0) = {|x| < R}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' This inequality holds for all R and hence U is constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Since U(x) → 0 at the infinity, we conclude that U = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' From(3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='16) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8), we get SΩ[φ] = ( 1 2I + KΩ)[f] on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' For f ∈ W 2 1 (∂Ω), we have KΩ[f] ∈ W 2 1 (∂Ω) and hence ( 1 2I + KΩ)[f] ∈ W 2 1 (∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It then follows from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 (i) that φ is unique and belongs to L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore, the representation formula (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='16) is unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' According to Lemmas 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5, the solution u to (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) has the following representation formula u(x) = SΩ[φ0](x) − DΩ[1](x) = SΩ[φ0](x), x in R3\\Ω, φ0 := ∂u ∂n ��� ∂Ω, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='18) where φ0 satisfies the integral equation SΩ[φ0] = 1 on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='19) Similarly to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='18), the solution uǫ to (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4) is represented by: uǫ(x) = SΩǫ[φǫ](x), x in R3\\Ωǫ, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='20) where φǫ is the unique solution to SΩǫ[φǫ] = 1 on ∂Ωǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='21) The following Lemma holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6 There exists C depending only on the C2-norm of X and C1-norm of h such that ��φǫ ◦ Ψǫ − φ0 �� L2(∂Ω) ≤ Cǫ, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='22) where φ0 and φǫ are defined in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='19) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='21), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 11 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let x ∈ ∂Ω, then ˜x = Ψǫ(x) = x + ǫh(x)n(x) ∈ ∂Ωǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' According to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='19) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='21) we have SΩǫ[φǫ] ◦ Ψǫ(x) = SΩ[φ0](x), x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='23) It then follows from Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 that SΩ[φǫ ◦ Ψǫ − φ0](x) = O(ǫ), x ∈ ∂Ω, with O(ǫ) is bounded in W 2 1 (∂Ω) by Cǫ for some constant C > 0 depending only on the C2-norm of X and C1-norm of h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Clearly the desired estimate (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='22) immediately follows from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 (i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Let φǫ and φ0 be the solutions of the integral equations (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='21) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='19), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We denote by φ := φǫ ◦ Ψǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Thanks to Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='6, we write φ = φ0 + ǫφ1, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='24) with φ1 is bounded in L2(∂Ω) and still depends on ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Define vǫ(x) = SΩ[φ1](x) − 2SΩ[τhφ0](x) + DΩ[hφ0](x), x ∈ R3\\Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='25) It follows from Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4 and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='24) that ���∂uǫ ∂˜n ◦ Ψǫ − ∂u ∂n − 2ǫτh∂u ∂n − ǫ∂vǫ ∂n ��� L2(∂Ω) ≤ Cǫ2��φ �� L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='26) Turning to Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='23), we confirm that ���SΩ[φ0] − SΩ[φ] − ǫS(1) Ω [φ] ��� W 2 1 (∂Ω) ≤ Cǫ2��φ �� L2(∂Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='27) Substitute φ = φ0 + ǫφ1 in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='27), we get ���h∂SΩ[φ0] ∂n ��� + + SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] �� + ��� W 2 1 (∂Ω) ≤ Cǫ ��φ �� L2(∂Ω), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='28) that is, ∥vǫ − v∥W 2 1 (∂Ω) ≤ Cǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='29) Since vǫ − v is harmonic in R3\\Ω, we obtain from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5 that (vǫ − v)(x) = SΩ �∂vǫ ∂n − ∂v ∂n � (x) − DΩ[vǫ − v](x), x ∈ R3\\Ω, and therefore, we deduce from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8) that SΩ �∂vǫ ∂n − ∂v ∂n � = �1 2 + KΩ � [vǫ − v] on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='30) It then follows from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='29), and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='30) that ���∂vǫ ∂n − ∂v ∂n ��� L2(∂Ω) ≤ Cǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='31) Finally, we prove the theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 as desired from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='26) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='31).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 12 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 It follows from Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3, Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5), and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='11) that cap(Ωǫ) = − 1 4π � ∂Ω ∂SΩǫ[φǫ] ∂˜n ��� +(˜x)SΩǫ[φǫ](˜x)d˜σ(˜x) = − 1 4π � ∂Ω ∂SΩ[φ] ∂n ��� +SΩ[φ]dσ − ǫ 4π � ∂Ω � − 2∂SΩ[τhφ] ∂n ��� + + ∂DΩ[hφ] ∂n � SΩ[φ]dσ − ǫ 4π � ∂Ω � − 2SΩ[τhφ] + DΩ[hφ] �� + �∂SΩ[φ] ∂n ��� +dσ − ǫ 4π � ∂Ω h �∂SΩ[φ] ∂n ��� + �2 dσ + ǫ 4π � ∂Ω 1 � det(G) � ∇ξ,θ · � h � det(G)G−1∇ξ,θSΩ[φ] �� SΩ[φ]dσ + O(ǫ2), where φ := φǫ ◦ Ψǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' From the decomposition of φ in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='24), we write cap(Ωǫ) = − 1 4π � ∂Ω ∂SΩ[φ0] ∂n ��� +SΩ[φ0]dσ − ǫ 4π � ∂Ω h �∂SΩ[φ0] ∂n ��� + �2 dσ − ǫ 4π � ∂Ω �∂SΩ[φ1] ∂n ��� + − 2∂SΩ[τhφ0] ∂n ��� + + ∂DΩ[hφ0] ∂n � SΩ[φ0]dσ − ǫ 4π � ∂Ω � SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] �� + �∂SΩ[φ0] ∂n ��� +dσ + ǫ 4π � ∂Ω 1 � det(G) � ∇ξ,θ · � h � det(G)G−1∇ξ,θSΩ[φ0] �� SΩ[φ0]dσ + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Since SΩ[φ0] = 1 on ∂Ω, we get ∇ξ,θSΩ[φ0] = 0 on ∂Ω and then the last integral is equal to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' By using Green’s formula, we deduce that the third integral is equal to the forth integral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore cap(Ωǫ) =cap(Ω) − ǫ 4π � ∂Ω h �∂SΩ[φ0] ∂n ��� + �2 dσ − ǫ 2π � ∂Ω � SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] �� + �∂SΩ[φ0] ∂n ��� +dσ + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='32) It follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='28) that � ∂Ω � SΩ[φ1] − 2SΩ[τhφ0] + DΩ[hφ0] �� + �∂SΩ[φ0] ∂n ��� +dσ = − � ∂Ω h �∂SΩ[φ0] ∂n ��� + �2 dσ + O(ǫ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='33) Finally, we conclude from the representation formula of u (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='18), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='32), and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='33) that cap(Ωǫ) =cap(Ω) + ǫ 4π � ∂Ω h �∂u ∂n �2 dσ + O(ǫ2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' This completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3 By (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='20) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='18), we have uǫ(x) − u(x) = − 1 4π � ∂Ω Γ(x − ˜y)∂uǫ ∂˜n (˜y)d˜σ(˜y) + 1 4π � ∂Ω Γ(x − y)∂u ∂n(y)dσ(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It then follows from Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='5), and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3) that uǫ(x) − u(x) = − ǫ 4π � ∂Ω Γ(x − ˜y) ∂v ∂n(y)dσ(y) − 1 4π � ∂Ω � Γ(x − ˜y) − Γ(x − y) �∂u ∂n(y)dσ(y) + O( ǫ2 |x|).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Since Γ(x − ˜y) = 1 |x| + O( 1 |x|2 ), Γ(x − ˜y) − Γ(x − y) = O( ǫ |x|2 ) as |x| → +∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore uǫ(x) − u(x) = − ǫ 4π|x| � ∂Ω ∂v ∂n(y)dσ(y) + O( ǫ |x|2 ) + O( ǫ2 |x|).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' According to the Green’s formula, we immediately see that � ∂Ω ∂v ∂ndσ = � ∂Ω ∂v ∂nudσ = � ∂Ω v ∂u ∂ndσ = − � ∂Ω h �∂u ∂n �2 dσ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' This completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 4 Sensitivity analysis of the first eigenvector of the op- erator K∗ Ω with respect to small perturbations in the surface of its domain Let Ω be a bounded domain in R3 with C2-boundary ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The spectrum of K∗ Ω : L2(∂Ω) → L2(∂Ω) is discrete, lies in the interval (− 1 2, 1 2], and accumulates at zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' More precisely, let {λj}∞ 0 be the eigenvalues of K∗ Ω on L2(∂Ω), then, the first eigenvalue λ0 is equal to 1/2 and has geometric multiplicity 1 while λj ∈ (− 1 2, 1 2) for j ≥ 1 with |λ1| ≥ |λ2| ≥ · · · → 0 as j → ∞ arranged repeatedly according to their multiplicities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' See for example [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Denote by ϕ0 the first eigenvector of K∗ Ω on L2(∂Ω) associated to the first eigenvalue 1/2 with ∥ϕ0∥L2(∂Ω) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' We claim that ϕ0 is equal to ∂u ∂n/∥ ∂u ∂n∥L2(∂Ω), where u represents the electrostatic potential in the presence of the conductor Ω in electrostatic equilibrium, it is the unique solution of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In fact, it follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='7) that ∂u ∂n = �1 2I + K∗ Ω ��∂u ∂n � on ∂Ω, namely, K∗ Ω �∂u ∂n � = 1 2 ∂u ∂n on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) 14 From the uniqueness of ϕ0, we deduce that ϕ0 = ∂u ∂n/∥ ∂u ∂n∥L2(∂Ω) on ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' It is known that the first eigenvalue 1/2 is independent of ∂Ω, that is, it does not affected by any smooth perturbations of ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' In view of this remark, the electrostatic capacity of an isolated conductor may also be defined as the amount of a charge required to raise the first eigenvalue of the K∗ Ω operator at 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Similarly to (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1), we have K∗ Ωǫ �∂uǫ ∂˜n � = 1 2 ∂uǫ ∂˜n on ∂Ωǫ, where uǫ is the unique solution of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Therefore the first eigenvector ϕǫ 0 of the operator K∗ ∂Ωǫ on L2(∂Ωǫ) with the eigenvalue 1/2 is equal to ∂uǫ ∂˜n /∥ ∂uǫ ∂˜n ∥L2(∂Ωǫ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' From Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1, we obtain in the following theorem the fourth result of this paper, an asymptotic expansion for the first eigenvector ϕǫ 0 on ∂Ωǫ as ǫ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1 Let ˜x = x + ǫh(x)n(x) ∈ ∂Ωǫ, for x ∈ ∂Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Let ϕǫ 0 and ϕ0 be the first eigenvectors of K∗ Ωǫ and K∗ Ω with the eigenvalue 1/2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following asymptotic expansion holds: ϕǫ 0(˜x) =ϕ0(x) + 2ǫτ(x)h(x)ϕ0(x) + ǫ˜v(x) − ǫ⟨τhϕ0 + ˜v, ϕ0⟩ϕ0(x) + O(ǫ2), with ˜v = ∂v ∂n/∥ ∂u ∂n∥L2(∂Ω), where u and v are the unique solutions of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='1) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='8), respectively, and the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The fifth result of this paper is the following theorem, an asymptotic expansion of � ∂Ω � ϕǫ 0(˜x) − ϕ0(x) � ϕ0(x)dσ(x) as ǫ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2 Let ϕǫ 0 and ϕ0 be the first eigenvectors of K∗ Ωǫ and K∗ Ω with the eigenvalue 1/2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The following asymptotic expansion holds: � ∂Ω � ϕǫ 0(˜x) − ϕ0(x) � ϕ0(x)dσ(x) =ǫ � ∂Ω τ(x)h(x)[ϕ0(x)]2dσ(x) + O(ǫ2), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) where the remainder O(ǫ2) depends only on the C2-norm of X and the C1-norm of h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' The asymptotic expansion (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='2) could be used to determine some properties on the shape perturbation of an object from measurements on the perturbed shape itself (see [24]) of the first eigenvector of the L2-adjoint of the NP operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' References [1] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ammari, An Introduction to Mathematics of Emerging Biomedical Imaging, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', Volume 62, Springer, Berlin, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [2] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ammari, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Beretta, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Francini, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Kang, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lim, Optimization algorithm for reconstruction interface changes of a conductivity inclusion from modal measurements, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' comp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 79 (2010), 1757-1777.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ammari, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Fitzpatrick, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Kang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ruiz, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Yu, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zhang, Mathematical and Computational Methods in Photonics and Phononics, Mathematical Surveys and Monographs, Volume 235, American Mathematical Society, Providence, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 15 [4] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ammari and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Kang Polarization and Moment Tensors with Applications to In- verse Problems and Effective Medium Theory, Applied Mathematical Sciences, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 162, SpringerVerlag, New York, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ammari, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Kang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lim, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Conductivity interface problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Part I: small perturbations of an interface, Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 362 (2010), 2435-2449.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [6] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ammari, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Kang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lim, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, The generalized polarization tensors for resolved imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Part I: shape reconstruction of a conductiovity inclusion, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' of comp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 81 (2012), 367-386.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [7] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Coifman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' McIntosh, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Meyer, L’int´egrale de Cauchy d´efinit un op´erateur born´e sur L2 pour les courbes lipschitziennes, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 116 (1982), 361–387.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [8] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Coifman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Goldberg, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Hrycak, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Israeli, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Rokhlin, An improved operator expantion algorithm for direct and inverse scattering computations, Waves Random Media, 9 (1999), 441-457.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [9] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Folland, Introduction to Partial Differential Equations, Princeton University Press, Princeton, New Jersey, 1976.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [10] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Jerison, A Minkowski problem for electrostatic capacity, Acta Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 176 (1996) 1-47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [11] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Kellogg, Foundations of Potential Theory, Dover, New York, 1953.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [12] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Khelifi and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Asymptotic expansions for the voltage potentials with two- and three-dimensional thin interfaces, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 34 (2011), 2274-2290.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [13] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Khelifi and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Boundary voltage perturbations resulting from small surface changes of a conductivity inclusion, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 93 (2014), 46-64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [14] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lagha, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Triki, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Small perturbations of an interface for elastostatic problems, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 40 (10)(2017), 3608-3636.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [15] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lagha and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, An asymptotic expansion for perturbations in the displacement field due to the presence of thin interfaces, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', volume 1, (2017), 1-23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [16] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Lim, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Louati and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Reconstructing small perturbations of scatterers from electric or acoustic far-field measurements, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 31 (2008), no 11, 1315-1332.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [17] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Ling, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Sanny, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Moebs, University Physics - Volume 2 (OpenStax), (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [18] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Maxwell, An elementary treatise on electricity, Clarendon Press in Oxford, 1881.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Poincar´e, Figures d’´equilibre d’une masse fluide, Paris, 1902.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [20] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Polya, Estimating electrostatic capacity, Am.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 54, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 4 (1947), 201–206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [21] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' P´olya and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Szeg¨o, Isoperimetric Inequalities in Mathematical Physics, Annals of Mathematical Studies, Number 27, Princeton University Press, Princeton, NJ, 1951.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 16 [22] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Szeg¨o, On the capacity of a condenser, Bull.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 51 (1945) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 325-350.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [23] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Asymptotic expansions for currents caused by small interface changes of an electromagnetic inclusion, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 92, (2013), 172-190.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' [24] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Zribi, Reconstructing small perturbations of an obstacle for acoustic waves from boundary measurements on the perturbed shape itself, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=', 45 (2022), no 1, 93-112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} +page_content=' 17' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VdAyT4oBgHgl3EQf8vrJ/content/2301.00863v1.pdf'} diff --git a/VtE3T4oBgHgl3EQf0gtr/content/2301.04738v1.pdf b/VtE3T4oBgHgl3EQf0gtr/content/2301.04738v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fe765847ffa8c5ae92bf5aee0ca421d5bdb884dc --- /dev/null +++ b/VtE3T4oBgHgl3EQf0gtr/content/2301.04738v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3eb0067607c7a48afef9e71b2e2d89de3ac21df0632afd64654d2a1e47bcbda +size 117709 diff --git a/VtE3T4oBgHgl3EQf0gtr/vector_store/index.faiss b/VtE3T4oBgHgl3EQf0gtr/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..aea21dd97398e49f0ca6e95e9e072bdaf203396b --- /dev/null +++ b/VtE3T4oBgHgl3EQf0gtr/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dc8a4e203f9c8e0e1dd4b5325dad74255e6ec259273f4b4c36582aa921a258c +size 786477 diff --git a/VtE3T4oBgHgl3EQf0gtr/vector_store/index.pkl b/VtE3T4oBgHgl3EQf0gtr/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3eef8bd26cbcaaf9d7160db4bb5159abfba7ab2e --- /dev/null +++ b/VtE3T4oBgHgl3EQf0gtr/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6961431dc70caec7089ad69875b9a9ef74e46ea0773bd0a811ad552d764c007 +size 30746 diff --git a/XNE0T4oBgHgl3EQfmQGx/vector_store/index.faiss b/XNE0T4oBgHgl3EQfmQGx/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..d1404c93abe1a20acccb99450c6306f7cfa35731 --- /dev/null +++ b/XNE0T4oBgHgl3EQfmQGx/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e34bf27857da266e3d83956f064f34a89195e1115ae47eb2d63d6789554d2a1 +size 2621485 diff --git a/XNE0T4oBgHgl3EQfmQGx/vector_store/index.pkl b/XNE0T4oBgHgl3EQfmQGx/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..2c3083e0ffa1a63e393176411321bbb84b15d059 --- /dev/null +++ b/XNE0T4oBgHgl3EQfmQGx/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa2e127e997369ebee0abfe0723d02dc14f9482890377de13f2ed9de5362a068 +size 101271 diff --git a/YtAyT4oBgHgl3EQf9fqe/content/2301.00876v1.pdf b/YtAyT4oBgHgl3EQf9fqe/content/2301.00876v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..08274a8c3477b1fb6b11c8c04497fa8331c10a71 --- /dev/null +++ b/YtAyT4oBgHgl3EQf9fqe/content/2301.00876v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:009d825e29f255460d73760f7309140ce08c4c1e1ce777e8cf593b1191102580 +size 337617 diff --git a/YtAyT4oBgHgl3EQf9fqe/vector_store/index.pkl b/YtAyT4oBgHgl3EQf9fqe/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3ed5a1be3237ee416856b3ed281fef76f689c19f --- /dev/null +++ b/YtAyT4oBgHgl3EQf9fqe/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a003acc39b930f3048f00857ba9106aa39ec0d7d3f87f806e916ebac7b0087e1 +size 117189 diff --git a/ZtFLT4oBgHgl3EQfWS9S/content/tmp_files/2301.12056v1.pdf.txt b/ZtFLT4oBgHgl3EQfWS9S/content/tmp_files/2301.12056v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..6037693d044e198364c91a1cff1537fae3ea3913 --- /dev/null +++ b/ZtFLT4oBgHgl3EQfWS9S/content/tmp_files/2301.12056v1.pdf.txt @@ -0,0 +1,3080 @@ +Published as a conference paper at ICLR 2023 +VARIATIONAL LATENT BRANCHING MODEL +FOR OFF-POLICY EVALUATION +Qitong Gao∗ +Ge Gao† +Min Chi† +Miroslav Pajic∗ +ABSTRACT +Model-based methods have recently shown great potential for off-policy evaluation +(OPE); offline trajectories induced by behavioral policies are fitted to transitions of +Markov decision processes (MDPs), which are used to rollout simulated trajectories +and estimate the performance of policies. Model-based OPE methods face two +key challenges. First, as offline trajectories are usually fixed, they tend to cover +limited state and action space. Second, the performance of model-based methods +can be sensitive to the initialization of their parameters. In this work, we propose +the variational latent branching model (VLBM) to learn the transition function +of MDPs by formulating the environmental dynamics as a compact latent space, +from which the next states and rewards are then sampled. Specifically, VLBM +leverages and extends the variational inference framework with the recurrent state +alignment (RSA), which is designed to capture as much information underlying the +limited training data, by smoothing out the information flow between the variational +(encoding) and generative (decoding) part of VLBM. Moreover, we also introduce +the branching architecture to improve the model’s robustness against randomly +initialized model weights. The effectiveness of the VLBM is evaluated on the +deep OPE (DOPE) benchmark, from which the training trajectories are designed +to result in varied coverage of the state-action space. We show that the VLBM +outperforms existing state-of-the-art OPE methods in general. +1 +INTRODUCTION +Off-policy evaluation (OPE) allows for evaluation of reinforcement learning (RL) policies without +online interactions. It is applicable to many domains where on-policy data collection could be +prevented due to efficiency and safety concerns, e.g., healthcare (Gao et al., 2022c;a; Tang & Wiens, +2021), recommendation systems (Mehrotra et al., 2018; Li et al., 2011), education (Mandel et al., +2014), social science (Segal et al., 2018) and optimal control (Silver et al., 2016; Vinyals et al., 2019; +Gao et al., 2020a; 2019; 2020b). Recently, as reported in the deep OPE (DOPE) benchmark (Fu et al., +2020b), model-based OPE methods, leveraging feed-forward (Fu et al., 2020b) and auto-regressive +(AR) (Zhang et al., 2020a) architectures, have shown promising results toward estimating the return +of target policies, by fitting transition functions of MDPs. However, model-based OPE methods +remain challenged as they can only be trained using offline trajectory data, which often offers limited +coverage of state and action space. Thus, they may perform sub-optimally on tasks where parts of the +dynamics are not fully explored (Fu et al., 2020b). Moreover, different initialization of the model +weights could lead to varied evaluation performance (Hanin & Rolnick, 2018; Rossi et al., 2019), +reducing the robustness of downstream OPE estimations. Some approaches in RL policy optimization +literature use latent models trained to capture a compact space from which the dynamics underlying +MDPs are extrapolated; this allows learning expressive representations over the state-action space. +However, such approaches usually require online data collections as the focus is on quickly navigating +to the high-reward regions (Rybkin et al., 2021), as well as on improving coverage of the explored +state and action space (Zhang et al., 2019; Hafner et al., 2019; 2020a) or sample efficiency (Lee et al., +2020). +In this work, we propose the variational latent branching model (VLBM), aiming to learn a compact +and disentangled latent representation space from offline trajectories, which can better capture the +∗Duke University, USA. Contact: {qitong.gao, miroslav.pajic}@duke.edu. +†North Carolina State University, USA. +Code available at https://github.com/gaoqitong/vlbm. +1 +arXiv:2301.12056v1 [cs.LG] 28 Jan 2023 + +Published as a conference paper at ICLR 2023 +dynamics underlying environments. VLBM enriches the architectures and optimization objectives for +existing latent modeling frameworks, allowing them to learn from a fixed set of offline trajectories. +Specifically, VLBM considers learning variational (encoding) and generative (decoding) distributions, +both represented by long short-term memories (LSTMs) with reparameterization (Kingma & Welling, +2013), to encode the state-action pairs and enforce the transitions over the latent space, respectively. +To train such models, we optimize over the evidence lower bound (ELBO) jointly with a recurrent +state alignment (RSA) term defined over the LSTM states; this ensures that the information encoded +into the latent space can be effectively teased out by the decoder. Then, we introduce the branching +architecture that allows for multiple decoders to jointly infer from the latent space and reach a +consensus, from which the next state and reward are generated. This is designed to mitigate the +side effects of model-based methods where different weight initializations could lead to varied +performance (Fu et al., 2020b; Hanin & Rolnick, 2018; Rossi et al., 2019). +We focus on using the VLBM to facilitate OPE since it allows to better distinguish the improvements +made upon learning dynamics underlying the MDP used for estimating policy returns, as opposed +to RL training where performance can be affected by multiple factors, e.g., techniques used for +exploration and policy optimization. Moreover, model-based OPE methods is helpful for evaluating +the safety and efficacy of RL-based controllers before deployments in the real world (Gao et al., +2022b), e.g., how a surgical robot would react to states that are critical to a successful procedure. +The key contributions of this paper are summarized as follows: (i) to the best of our knowledge, the +VLBM is the first method that leverages variational inference for OPE. It can be trained using offline +trajectories and capture environment dynamics over latent space, as well as estimate returns of target +(evaluation) policies accurately. (ii) The design of the RSA loss term and branching architecture +can effectively smooth the information flow in the latent space shared by the encoder and decoder, +increasing the expressiveness and robustness of the model. This is empirically shown in experiments +by comparing with ablation baselines. (iii) Our method generally outperforms existing model-based +and model-free OPE methods, for evaluating policies over various D4RL environments (Fu et al., +2020a). Specifically, we follow guidelines provided by the DOPE benchmark (Fu et al., 2020b), which +contains challenging OPE tasks where the training trajectories include varying levels of coverage of +the state-action space, and target policies are designed toward resulting in state-action distributions +different from the ones induced by behavioral policies. +2 +VARIATIONAL LATENT BRANCHING MODEL +In this section, we first introduce the objective of OPE and the variational latent model (VLM) +we consider. Then, we propose the recurrent state alignment (RSA) term as well as the branching +architecture that constitute the variational latent branching model (VLBM). +2.1 +OPE OBJECTIVE +We first introduce the MDP used to characterize the environment. Specifically, an MDP can be +defined as a tuple M = (S, A, P, R, s0, γ), where S is the set of states, A the set of actions, +P : S × A → S is the transition distribution usually captured by probabilities p(st|st−1, at−1), +R : S × A → R is the reward function, s0 is the initial state sampled from the initial state distribution +p(s0), γ ∈ [0, 1) is the discounting factor. Finally, the agent interacts with the MDP following +some policy π(a|s) which defines the probabilities of taking action a at state s. Then, the goal of +OPE can be formulated as follows. Given trajectories collected by a behavioral policy β, ρβ = +{[(s0, a0, r0, s1), . . . , (sT −1, aT −1, rT −1, sT )](0), [(s0, a0, r0, s1), . . . ](1), . . . |at ∼ β(at|st)}1, es- +timate the expected total return over the unknown state-action visitation distribution ρπ of the target +(evaluation) policy π – i.e., for T being the horizon, +E(s,a)∼ρπ,r∼R +��T +t=0 γtR(st, at) +� +. +(1) +2.2 +VARIATIONAL LATENT MODEL +We consider the VLM consisting of a prior p(z) over the latent variables z ∈ Z ⊂ Rl, with Z repre- +senting the latent space and l the dimension, along with a variational encoder qψ(zt|zt−1, at−1, st) +1We slightly abuse the notation ρβ, to represent either the trajectories or state-action visitation distribution +under the behavioral policy, depending on the context. +2 + +Published as a conference paper at ICLR 2023 +and a generative decoder pφ(zt, st, rt−1|zt−1, at−1), parameterized by ψ and φ respectively. Basics +of variational inference are introduced in Appendix F. +Latent Prior p(z0). +The prior specifies the distribution from which the latent variable of the initial +stage, z0, is sampled. We configure p(z0) to follow a Gaussian with zero mean and identity covariance +matrix, which is a common choice under the variational inference framework (Kingma & Welling, +2013; Lee et al., 2020). +ℎ! +"" +#" +%" +$" +$! +"! +ℎ" +ℎ# +#! +$# +. . . +. . . +. . . +"# +%! +ℎ$ +$$ +!$%& +"$%& +$$%' +"$ +$$%& +Inference Process +Generative Process +Figure 1: Architecture of variational latent model +(VLM) we consider. +Variational +Encoder +for +Inference +qψ(zt|zt−1, at−1, st). +The +encoder +is +used +to +approximate +the +intractable +posterior, +p(zt|zt−1, at−1, st) += +p(zt−1,at−1,zt,st) +� +zt∈Z p(zt−1,at−1,zt,st)dzt , +where +the +de- +nominator +requires +integrating +over +the +unknown latent space. Specifically, the encoder +can be decomposed into two parts, given that +qψ(z0:T |s0:T , a0:T −1) +=qψ(z0|s0) +T +� +t=1 +qψ(zt|zt−1, at−1, st); +(2) +here, qψ(z0|s0) encodes the initial state s0 +in to the corresponding latent variable z0, +then, qψ(zt|zt−1, at−1, st) enforces the transi- +tion from zt−1 to zt conditioned on at−1 and st. Both distributions are diagonal Gaussians2, with +means and diagonal of covariance matrices determined by multi-layered perceptron (MLP) (Bishop, +2006) and long short-term memory (LSTM) (Hochreiter & Schmidhuber, 1997) respectively. The +weights for both neural networks are referred to as ψ in general. +Consequently, the inference process for zt can be summarized as +zψ +0 ∼ qψ(z0|s0), +hψ +t = fψ(hψ +t−1, zψ +t−1, at−1, st), +zψ +t ∼ qψ(zt|hψ +t ), +(3) +where fψ represents the LSTM layer and hψ +t the LSTM recurrent (hidden) state. Note that we use ψ +in superscripts to distinguish the variables involved in this inference process, against the generative +process introduced below. Moreover, reparameterization can be used to sample zψ +0 and zψ +t , such that +gradients of sampling can be back-propagated, as introduced in (Kingma & Welling, 2013). Overview +of the inference and generative processes are illustrated in Fig. 1. +Generative Decoder for Sampling pφ(zt, st, rt−1|zt−1, at−1). +The decoder is used to interact +with the target policies and acts as a synthetic environment during policy evaluation, from which the +expected returns can be estimated as the mean return of simulated trajectories. The decoder can be +represented by the multiplication of three diagonal Gaussian distributions, given that +pφ(z1:T , s0:T , r0:T −1|z0, π) = +T +� +t=0 +pφ(st|zt) +T +� +t=1 +pφ(zt|zt−1, at−1)pφ(rt−1|zt), +(4) +with at ∼ π(at|st) at each time step. Specifically, pφ(zt|zt−1, at−1) has its mean and covariance +determined by an LSTM, enforcing the transition from zt−1 to zt in the latent space given action +at−1. In what follows, pφ(st|zt) and pφ(rt−1|zt) generate the current state st and reward rt−1 given +zt, whose mean and covariance are determined by MLPs. As a result, the generative process starts +with sampling the initial latent variable from the latent prior, i.e., zφ +0 ∼ p(z0). Then, the initial state +sφ +0 ∼ pφ(s0|zφ +0 ) and action a0 ∼ π(a0|sφ +0) are obtained from pφ and target policy π, respectively; +the rest of generative process can be summarized as +hφ +t = fφ(hφ +t−1, zφ +t−1, at−1), +˜hφ +t = gφ(hφ +t ), +zφ +t ∼ pφ(˜hφ +t ), +sφ +t ∼ pφ(st|zφ +t ), +rφ +t−1 ∼ pφ(rt−1|zφ +t ), +at ∼ π(at|sφ +t ), +(5) +2Assume that different dimensions of the states are non-correlated with each other. Otherwise, the states can +be projected to orthogonal basis, such that non-diagonal elements of the covariance matrix will be zeros. +3 + +Published as a conference paper at ICLR 2023 +Figure 2: (Left) Recurrent state alignment (RSA) applied over the recurrent hidden states between +inference and generative process illustrated separately. (Right) Single-step forward pass of the varia- +tional latent branching model (VLBM), the training objectives for each branch and final predictions. +where fφ is the LSTM layer producing recurrent state hφ +t . Then, an MLP gφ is used to generate +mapping between hφ +t and ˜hφ +t that will be used for recurrent state alignment (RSA) introduced below, +to augment the information flow between the inference and generative process. +Furthermore, to train the elements in the encoder (3) and decoder (5), one can maximize the evidence +lower bound (ELBO), a lower bound of the joint log-likelihood p(s0:T , r0:T −1), following +LELBO(ψ, φ) =Eqψ +� �T +t=0 log pφ(st|zt) + +�T +t=1 log pφ(rt−1|zt) − KL +� +qψ(z0|s0)||p(z0) +� +− +�T +t=1 KL +� +qψ(zt|zt−1, at−1, st)||pφ(zt|zt−1, at−1) +�� +; +(6) +here, the first two terms represent the log-likelihood of reconstructing the states and rewards, and the +last two terms regularize the approximated posterior. The proof can be found in Appendix E. +2.3 +RECURRENT STATE ALIGNMENT +The latent model discussed above is somewhat reminiscent of the ones used in model-based RL +policy training methods, e.g., recurrent state space model (RSSM) used in PlaNet (Hafner et al., +2019) and Dreamer (Hafner et al., 2020a;b), as well as similar ones in Lee et al. (2020); Lu et al. +(2022). Such methods rely on a growing experience buffer for training, which is collected online +by the target policy that is being concurrently updated (with exploration noise added); however, +OPE aims to extrapolate returns from a fixed set of offline trajectories which may result in limited +coverage of the state and action space. Consequently, directly applying VLM for OPE can lead to +subpar performance empirically; see results in Sec. 3. Moreover, the encoder above plays a key +role of capturing the temporal transitions between latent variables, i.e., pψ(zt|zt−1, at−1, st) from +(2). However, it is absent in the generative process, as the decoder leverages a separate network to +determine the latent transitions, i.e., pφ(zt|zt−1, at−1). Moreover, from the ELBO (6) above it can +be seen that only the KL-divergence terms are used to regularize these two parts, which may not be +sufficient for OPE as limited offline trajectories are provided. As a result, we introduce the RSA term +as part of the training objective, to further regularize pψ(zt|zt−1, at−1, st) and pφ(zt|zt−1, at−1). A +graphical illustration of RSA can be found in Fig. 2.3 +Specifically, RSA is defined as the mean pairwise squared error between hψ +t from the encoder (3) +and ˜hφ +t from the decoder (5), i.e., +LRSA(˜hφ +t , hψ +t ; ψ, φ) = 1 +N +N +� +i=1 +T +� +t=0 +M(M − 1) +2 +� M−1 +� +j=1 +M +� +k=j+1 +� +(˜hφ +t [j] − ˜hφ +t [k]) − (hψ +t [j] − hψ +t [k]) +�2� +; +(7) +here, we assume that both LSTM recurrent states have the same dimension ˜hφ +t , hψ +t ∈ RM, with +h(·) +t [j] referring to the j-th element of the recurrent state, and N the number of training trajectories. +Here, we choose the pairwise squared loss over the classic mean squared error (MSE), because MSE +could be too strong to regularize hψ +t and ˜hφ +t which support the inference and generative processes +respectively and are not supposed to be exactly the same. In contrast, the pairwise loss (7) can +3Rewards and actions are omitted for conciseness of the presentation. +4 + +max Likelihood +S +Φ1 +AMLP +Z. +St +Φ2 +LSTM +MLP +D +2 +S +山 +S +at- +ΦB +LSTM +ΦB +u +七 +ΦB +Lr +D +min KL DivergenceInference +So +S1 +S2 +山 +ha +必 +Recurrent State +Alignment (RSA) +& +.Φ +2 +Z. +2 +d +S1 +Generative +S.Published as a conference paper at ICLR 2023 +promote structural similarity between the LSTM recurrent states of the encoder and decoder, without +strictly enforcing them to become the same. Note that this design choice has been justified in Sec. 3 +through an ablation study by comparing against models trained with MSE. In general, the pairwise +loss has also been adopted in many domains for similar purposes, e.g., object detection (Gould +et al., 2009; Rocco et al., 2018), ranking systems (Doughty et al., 2018; Saquil et al., 2021) and +contrastive learning (Wang et al., 2021; Chen et al., 2020). Similarly, we apply the pairwise loss over +hψ +t and ˜hφ +t , instead of directly over hψ +t and hφ +t , as the mapping gφ (from equation 5) could serve as a +regularization layer to ensure optimality over LRSA without changing hψ +t , hφ +t significantly. +As a result, the objective for training the VLM, following architectures specified in (3) and (5), can +be formulated as +max +ψ,φ LV LM(ψ, φ) = max +ψ,φ +� +LELBO(ψ, φ) − C · LRSA(˜hφ +t , hψ +t ; ψ, φ) +� +, +(8) +with C > 0 and C ∈ R being the constant balancing the scale of the ELBO and RSA terms. +2.4 +BRANCHING FOR GENERATIVE DECODER +The performance of model-based methods can vary upon different design factors (Fu et al., 2020b; +Hanin & Rolnick, 2018). Specifically, Rossi et al. (2019) has found that the convergence speed and +optimality of variational models are sensitive to the choice of weight initialization techniques. More- +over, under the typical variational inference setup followed by the VLM above, the latent transitions +reconstructed by the decoder, pφ(zt|zt−1, at−1), are only trained through regularization losses in (6) +and (7), but are fully responsible for rolling out trajectories during evaluation. Consequently, in this +sub-section we introduce the branching architecture for decoder, with the goal of minimizing the +impact brought by random weight initialization of the networks, and allowing the decoder to best +reconstruct the latent transitions pφ(zt|zt−1, at−1) as well as st’s and rt−1’s correctly. Specifically, +the branching architecture leverages an ensemble of B ∈ Z+ decoders to tease out information from +the latent space formulated by the encoder, with final predictions sampled from a mixture of the +Gaussian output distributions from (5). Note that the classic setup of ensembles is not considered, +i.e., train and average over B VLMs end-to-end; because in this case B different latent space exist, +each of which is still associated with a single decoder, leaving the challenges above unresolved. This +design choice is justified by ablations studies in Sec. 3, by comparing VLBM against a (classic) +ensemble of VLMs. +Branching Architecture. +Consider the generative process involving B branches of the decoders +parameterized by {φ1, . . . , φB}. The forward architecture over a single step is illustrated in Fig. 2.4 +Specifically, the procedure of sampling zφb +t +and sφb +t +for each b ∈ [1, B] follows from (5). Recall that +by definition pφb(st|zφb +t ) follows multivariate Gaussian with mean and diagonal of covariance matrix +determined by the corresponding MLPs, i.e., µ(sφb +t ) = φMLP +b,µ +(zφb +t ) and Σdiag(sφb +t ) = φMLP +b,Σ +(zφb +t ). +In what follows, the final outcome sφ +t can be sampled following diagonal Gaussian with mean and +variance determined by weighted averaging across all branches using weights wb’s, i.e., +sφ +t ∼ pφ(st|zφ1 +t , . . . , zφB +t +) = N +� +µ = +� +b +wb · µ(sφb +t ), Σdiag = +� +b +w2 +b · Σdiag(sφb +t ) +� +. +(9) +The objective below can be used to jointly update, wb’s, ψ and φb’s, i.e., +max +ψ,φ,w LV LBM(ψ, φ1, . . . , φB, w1, . . . , wB) += max +ψ,φ,w +� +T +� +t=0 +log pφ(sφ +t |zφ1 +t , . . . , zφB +t +) − C1 · +� +b +LRSA(˜hφb +t , hψ +t ; ψ, φb) + C2 +� +b +LELBO(ψ, φb) +� +, +s.t. +w1, . . . , wB > 0 , +� +b +wb = 1 and constants C1, C2 > 0. +(10) +Though the first term above already propagates through all wb’s and φb’s, the third term and constraints +over wb’s regularize φb in each individual branch such that they are all trained toward maximizing +4For simplicity, the parts generating rewards are omitted without lost of generality. +5 + +Published as a conference paper at ICLR 2023 +the likelihood pφb(sφb +t |zφb +t ). Pseudo-code for training and evaluating the VLBM can be found in +Appendix C. Further, in practice, one can define wb = +v2 +b +ϵ+� +b v2 +b , with vb ∈ R the learnable variables +and 0 < ϵ ≪ 1, ϵ ∈ R, the constant ensuring denominator to be greater than zero, to convert (10) +into unconstrained optimization and solve it using gradient descent. Lastly, note that complementary +latent modeling methods, e.g., latent overshooting from Hafner et al. (2019), could be adopted in (10). +However, we keep the objective straightforward, so that the source of performance improvements can +be isolated. +3 +EXPERIMENTS +Figure 3: Mean rank correlation, regret@1 and MAE over +all the 32 Gym-Mujoco and Adroit tasks, showing VLBM +achieves state-of-the-art performance overall. +To evaluate the VLBM, we follow +the guidelines from the deep OPE +(DOPE) benchmark (Fu et al., 2020b). +Specifically, we follow the D4RL +branch in DOPE and use the Gym- +Mujoco and Adroit suites as the test +base (Fu et al., 2020a). Such environ- +ments have long horizons and high- +dimensional state and action space, +which are usually challenging for +model-based methods. The provided +offline trajectories for training are +collected using behavioral policies +at varied scale, including limited ex- +ploration, human teleoperation etc., +which can result in different levels of +coverage over the state-action space. Also, the target (evaluation) policies are generated using online +RL training, aiming to reduce the similarity between behavioral and target policies; it introduces +another challenge that during evaluation the agent may visit states unseen from training trajectories. +Environmental and Training Setup. +A total of 8 environments are provided by Gym-Mujoco and +Adroit suites (Fu et al., 2020b;a). Moreover, each environment is provided with 5 (for Gym-Mujoco) +or 3 (for Adroit) training datasets collected using different behavioral policies, resulting in a total of +32 sets of env-dataset tasks5 – a full list can be found in Appendix A. DOPE also provides 11 +target policies for each environment, whose performance are to be evaluated by the OPE methods. +They in general result in varied scales of returns, as shown in the x-axes of Fig. 7. Moreover, we +consider the decoder to have B = 10 branches, i.e., {pφ1, . . . , pφ10}. The dimension of latent space +is set to be 16, i.e., z ∈ Z ⊂ R16. Other implementation details can be found in Appendix A. +Baselines and Evaluation Metrics. +In addition to the five baselines reported from DOPE, i.e., +importance sampling (IS) (Precup, 2000), doubly robust (DR) (Thomas & Brunskill, 2016), variational +power method (VPM) (Wen et al., 2020), distribution correction estimation (DICE) (Yang et al., 2020), +and fitted Q-evaluation (FQE) (Le et al., 2019), the effectiveness of VLBM is also compared against +the state-of-the-art model-based OPE method leveraging the auto-regressive (AR) architecture (Zhang +et al., 2020a). Specifically, for each task we train an ensemble of 10 AR models, for fair comparisons +against VLBM which leverages the branching architecture; see Appendix A for details of the AR +ensemble setup. Following the DOPE benchmark (Fu et al., 2020b), our evaluation metrics includes +rank correlation, regret@1, and mean absolute error (MAE). VLBM and all baselines are trained +using 3 different random seeds over each task, leading to the results reported below. +Ablation. +Four ablation baselines are also considered, i.e., VLM, VLM+RSA, VLM+RSA(MSE) +and VLM+RSA Ensemble. Specifically, VLM refers to the model introduced in Sec. 2.2, trained +toward maximizing only the ELBO, i.e., (6). Note that, arguably, VLM could be seen as the general- +ization of directly applying latent-models proposed in existing RL policy optimization literature (Lee +et al., 2020; Hafner et al., 2019; 2020a;b; Lu et al., 2022); details can be found in Sec. 4 below. The +VLM+RSA ablation baseline follows the same model architecture as VLM, but is trained to optimize +over both ELBO and recurrent state alignment (RSA) as introduced in (8), i.e., branching is not used +comparing to VLBM. The design of these two baselines can help analyze the effectiveness of the RSA +5From now on the dataset names are abbreviated by their initials, e.g., Ant-M-R refers to Ant-Medium-Replay. +6 + +VLBM +BaselinesPublished as a conference paper at ICLR 2023 +Figure 4: Mean rank correlation, regret@1 and MAE over all datasets, for each Mujoco environment. +Figure 5: Mean rank correlation, regret@1 and MAE over all datasets, for each Adroit environment. +loss term and branching architecture introduced in Sec. 2.3 and 2.4. Moreover, VLM+RSA(MSE) +uses mean squared error to replace the pairwise loss introduced in (7), and the VLM+RSA Ensemble +applies classic ensembles by averaging over B VLM+RSA models end-to-end, instead of branching +from decoder as in VLBM. These two ablation baselines can help justify the use of pairwise loss for +RSA, and the benefit of using branching architecture over classic ensembles. +Figure 6: Distribution of all branching +weights, wb’s, over all VLBMs trained +on the 32 tasks. +Results. +Fig. 3 shows the mean overall performance +attained by VLBM and baselines over all the 32 Gym- +Mujoco and Adroit tasks. In general VLBM leads to +significantly increased rank correlations and decreased re- +gret@1’s over existing methods, with MAEs maintained +at the state-of-the-art level. Specifically, VLBM achieves +state-of-the-art performance in 31, 29, and 15 (out of 32) +tasks in terms of rank correlation, regret@1 and MAE, +respectively. Performance for each task can be found in +Tables 1- 6 at the end of Appendices. Note that results for +IS, VPM, DICE, DR, and FQE are obtained directly from +DOPE benchmark (Fu et al., 2020b), since the same ex- +perimental setup is considered. Fig. 4 and 5 visualize the +mean performance for each Gym-Mujoco and Adroit environment respectively, over all the associated +datasets. It can be also observed that the model-based and FQE baselines generally perform better +than the other baselines, which is consistent with findings from DOPE. +The fact that VLM+RSA outperforming the VLM ablation baseline, as shown in Fig. 4, illustrates the +need of the RSA loss term to smooth the flow of information between the encoder and decoder, in +the latent space. Moreover, one can observe that VLM+RSA(MSE) sometimes performs worse than +VLM, and significantly worse than VLM+RSA in general. Specifically, it has be found that, compared +to VLM and VLM+RSA respectively, VLM+RSA(MSE) significantly worsen at least two metrics +in 7 and 12 (out of 20) Gym-Mujoco tasks; detailed performance over these tasks can be found in +Tables 1- 6 at the end of Appendices. Such a finding backs up the design choice of using pairwise loss +for RSA instead of MSE, as MSE could be overly strong to regularize the LSTM recurrent states of +the encoder and decoder, while pairwise loss only enforces structural similarities. Moreover, VLBM +significantly improves rank correlations and regrets greatly compared to VLM+RSA, illustrating the +importance of the branching architecture. In the paragraph below, we show empirically the benefits +brought in by branching over classic ensembles. +7 + +VLBM +Ablation Baselines +Other BaselinesT +H +VLBM +Baselines Weights +67.0% +50.0% +.. of Branching +33.0% +17.0% +Dist. +0.0% +0.0 0.1 0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 0.9 1.0Published as a conference paper at ICLR 2023 +Figure 8: t-SNE visualization over the latent space, capturing encoded state-action visitations induced +from all target policies. Each point is colored by the corresponding policy from which it is generated. +Policies in the legend are sorted in the order of increasing performance. +Branching versus Classic Ensembles. +Fig. 4 shows that the VLM+RSA Ensemble does not +improve performance over the VLM+RSA in general, and even leads to worse overall rank correlations +and regrets in Walker2d and Hopper environments. This supports the rationale provided in Sec. 2.4 +that each decoder still samples from different latent space exclusively, and averaging over the output +distributions may not help reduce the disturbance brought in by the modeling artifacts under the +variational inference framework, e.g., random weight initializations (Hanin & Rolnick, 2018; Rossi +et al., 2019). In contrast, the VLBM leverages the branching architecture, allowing all the branches to +sample from the same latent space formulated by the encoder. Empirically, we find that the branching +weights, wb’s in (9), allows VLBM to kill branches that are not helpful toward reconstructing the +trajectories accurately, to possibly overcome bad initializations etc. Over all the the 32 tasks we +consider, most of VLBMs only keep 1-3 branches (out of 10), i.e., wb < 10−5 for all other branches. +The distribution of all wb’s, from VLBMs trained on the 32 tasks, are shown in Fig. 6; one can +observe that most of the wb’s are close to zero, while the others generally fall in the range of (0, 0.25] +and [0.75, 1). +Figure 7: Correlation between the estimated (y- +axis) and true returns (x-axis), across different +model-based OPE methods and environments. +AR ensembles also lead to compelling rank cor- +relations and regrets, but attains much smaller +margins in MAEs over other baselines in gen- +eral; see Fig. 3. From Fig. 7, one can observe +that it tends to significantly under-estimate most +of the high-performing policies. Scatter plots +for the other tasks can be found in Appendix A, +which also show this trend. The reason could +be that its model architecture and training objec- +tives are designed to directly learn the transitions +of the MDP; thus, may produce biased predic- +tions when the target policies lead to visitation +of the states that are not substantially presented +in training data, since such data are obtained us- +ing behavioral policies that are sub-optimal. In +contrast, the VLBM can leverage RSA and branching against such situations, thus outperforming AR +ensembles in most of the OPE tasks in terms of all metrics we considered. Interestingly, Fig. 7 also +shows that latent models could sometimes over-estimate the returns. For example, in Hopper-M-E and +Walker2d-M-E, VLM tends to over-estimate most policies. The VLBM performs consistently well in +Hopper-M-E, but is mildly affected by such an effect in Walker2d-M-E, though over fewer policies +and smaller margins. It has been found that variational inference may fall short in approximating true +distributions that are asymmetric, and produce biased estimations (Yao et al., 2018). So the hypothesis +would be that the dynamics used to define certain environments may lead to asymmetry in the true +posterior p(zt|zt−1, at−1, st), which could be hard to be captured by the latent modeling framework +we consider. More comprehensive understanding of such behavior can be explored in future work. +However, the VLBM still significantly outperforms VLM overall, and achieves top-performing rank +correlations and regrets; such results illustrate the VLBM’s improved robustness as a result of its +architectural design and choices over training objectives. +t-SNE Visualization of the Latent Space. +Fig. 8 illustrates t-SNE visualization of the latent space +by rolling out trajectories using all target policies respectively, followed by feeding the state-action +pairs into the encoder of VLBM which maps them into the latent space. It shows the encoded +state-action pairs induced from policies with similar performance are in general swirled and clustered +together, illustrating that VLBM can learn expressive and disentangled representations of its inputs. +8 + +2391 +1252 +1342 +933 +Ensembl +1559 +789 +517 +567 +727 +237 +-219 +202 +R +105. +316. +955 +164. +A +727 +2391 +316 +237 +789 +1342 +-219 +1252 +-105 +1559 +-955 +517 +-164 +202 +567 +933 +1252 +1342 +933 +2391 +1559 +789 +517 +567 +> +727 +237 +-219 +202 +-105 +316 +164. +955 +955-2195i7 +?105 +1559 2391 +567 +727 +-316 +237 +789 +1342 +1252 +164 +202 +933 +2391 +1252 +1342 +933 +M +1559 +789 +517 +567 +727 +237 +-219 +202 +-105. +-316. +-164. +.955 +105 +5727 +15592391 +316 +237 +789 +1342 +955 +5-219 +5171252 +164202 +567 +933 +Halfcheetah-M-E Hopper-M-E +Ant-M-E +Walker2d-M-TT +0 +Lowest +Performance +4 +5 +6 +8 +Highest +Halfcheetah-M-E +Walker2d-M-E +Ant-M-E +9 +Hopper-M-E +10 + PerformancePublished as a conference paper at ICLR 2023 +4 +RELATED WORK +Latent Modeling in RL. +Though variational inference has rarely been explored to facilitate model- +based OPE methods so far, there exist several latent models designed for RL policy optimization that +are related to our work, such as SLAC (Lee et al., 2020), SOLAR (Zhang et al., 2019), LatCo (Rybkin +et al., 2021), PlaNet (Hafner et al., 2019), Dreamer (Hafner et al., 2020a;b). Below we discuss +the connections and distinctions between VLBM and the latent models leveraged by them, with a +detailed overview of these methods provided in Appendix G. Specifically, SLAC and SOLAR learn +latent representations of the dynamics jointly with optimization of the target policies, using the latent +information to improve sample efficiency. Similarly, LatCo performs trajectory optimization over +the latent space to allow for temporarily bypassing dynamic constraints. As a result, latent models +used in such methods are not designed toward rolling out trajectories independently, as opposed to +the use of VLBM in this paper. PlaNet and Dreamer train the recurrent state space model (RSSM) +using a growing experience dataset collected by the target policy that is being concurrently updated +(with exploration noise added), which requires online data collection. In contrast, under the OPE +setup, VLBM is trained over a fixed set of offline trajectories collected over unknown behavioral +policies. Moreover, note that the VLM baseline is somewhat reminiscent of the RSSM and similar +ones as in Lee et al. (2020); Lu et al. (2022), however, experiments above show that directly using +VLM for OPE could lead to subpar performance. On the other hand, though MOPO (Yu et al., 2020), +LOMPO (Rafailov et al., 2021) and COMBO (Yu et al., 2021) can learn from offline data, they +focus on quantifying the uncertainty of model’s predictions toward next states and rewards, followed +by incorporating them into policy optimization objectives to penalize for visiting regions where +transitions are not fully captured; thus, such works are also orthogonal to the use case of OPE. +OPE. +Classic OPE methods adopt IS to estimate expectations over the unknown visitation distribu- +tion over the target policy, resulting in weighted IS, step-wise IS and weighted step-wise IS (Precup, +2000). IS can lead to estimations with low (or zero) bias, but with high variance (Kostrikov & +Nachum, 2020; Jiang & Li, 2016), which sparks a long line of research to address this challenge. +DR methods propose to reduce variance by coupling IS with a value function approximator (Jiang +& Li, 2016; Thomas & Brunskill, 2016; Farajtabar et al., 2018). However, the introduction of such +approximations may increase bias, so the method proposed in Tang et al. (2019) attempts to balance +the scale of bias and variance for DR. Unlike IS and DR methods that require the behavioral policies +to be fully known, DICE family of estimators (Zhang et al., 2020c;b; Yang et al., 2021; 2020; Nachum +et al., 2019; Dai et al., 2020) and VPM (Wen et al., 2020) can be behavioral-agnostic; they directly +capture marginalized IS weights as the ratio between the propensity of the target policy to visit +particular state-action pairs, relative to their likelihood of appearing in the logged data. There also +exist FQE methods which extrapolate policy returns from approximated Q-functions (Hao et al., +2021; Le et al., 2019; Kostrikov & Nachum, 2020). Existing model-based OPE methods are designed +to directly fit MDP transitions using feed-forward (Fu et al., 2020b) or auto-regressive (Zhang et al., +2020a) models, and has shown promising results over model-free methods as reported in a recent +benchmark (Fu et al., 2020b). However, such model-based approaches could be sensitive to the +initialization of weights (Hanin & Rolnick, 2018; Rossi et al., 2019) and produce biased predictions, +due to the limited coverage over state and action space provided by offline trajectories (Fu et al., +2020b). Instead, VLBM mitigates such effects by capturing the dynamics over the latent space, such +that states and rewards are evolved from a compact feature space over time. Moreover, RSA and the +branching can lead to increased expressiveness and robustness, such that future states and rewards +are predicted accurately. There also exist OPE methods proposed toward specific applications (Chen +et al., 2022; Saito et al., 2021; Gao et al., 2023; 2022b). +5 +CONCLUSION AND FUTURE WORK +We have developed the VLBM which can accurately capture the dynamics underlying environments +from offline training data that provide limited coverage of the state and action space; this is achieved +by using the RSA term to smooth out the information flow from the encoders to decoders in the +latent space, as well as the branching architecture which improve VLBM’s robustness against random +initializations. We have followed evaluation guidelines provided by the DOPE benchmark, and +experimental results have shown that the VLBM generally outperforms the state-of-the-art model- +based OPE method using AR architectures, as well as other model-free methods. VLBM can also +facilitate off-policy optimizations, which can be explored in future works. Specifically, VLBM can +serve as a synthetic environment on which optimal controllers (e.g., linear–quadratic regulator) can +be deployed. On the other hand, similar to Dreamer and SLAC, policies can be updated jointly with +training of VLBM, but without the need of online interactions with the environment during training. +9 + +Published as a conference paper at ICLR 2023 +ACKNOWLEDGMENTS +This work is sponsored in part by the AFOSR under award number FA9550-19-1-0169, as well as the +NSF CNS-1652544, CNS-1837499, DUE-1726550, IIS-1651909 and DUE-2013502 awards, as well +as the National AI Institute for Edge Computing Leveraging Next Generation Wireless Networks, +Grant CNS-2112562. +REFERENCES +Christopher M Bishop. Pattern recognition and machine learning. Springer, 2006. +Minmin Chen, Can Xu, Vince Gatto, Devanshu Jain, Aviral Kumar, and Ed Chi. Off-policy actor- +critic for recommender systems. In Proceedings of the 16th ACM Conference on Recommender +Systems, pp. 338–349, 2022. +Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for +contrastive learning of visual representations. In International Conference on Machine Learning, +pp. 1597–1607. PMLR, 2020. +Bo Dai, Ofir Nachum, Yinlam Chow, Lihong Li, Csaba Szepesvári, and Dale Schuurmans. Coindice: +Off-policy confidence interval estimation. Advances in Neural Information Processing Systems, +33:9398–9411, 2020. +Hazel Doughty, Dima Damen, and Walterio Mayol-Cuevas. Who’s better? who’s best? pairwise deep +ranking for skill determination. In Proceedings of the IEEE Conference on Computer Vision and +Pattern Recognition, pp. 6057–6066, 2018. +Mehrdad Farajtabar, Yinlam Chow, and Mohammad Ghavamzadeh. More robust doubly robust +off-policy evaluation. In International Conference on Machine Learning, pp. 1447–1456. PMLR, +2018. +Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine. D4rl: Datasets for deep +data-driven reinforcement learning. arXiv preprint arXiv:2004.07219, 2020a. +Justin Fu, Mohammad Norouzi, Ofir Nachum, George Tucker, Alexander Novikov, Mengjiao Yang, +Michael R Zhang, Yutian Chen, Aviral Kumar, Cosmin Paduraru, et al. Benchmarks for deep +off-policy evaluation. In International Conference on Learning Representations, 2020b. +Ge Gao, Qitong Gao, Xi Yang, Miroslav Pajic, and Min Chi. A reinforcement learning-informed pat- +tern mining framework for multivariate time series classification. In International Joint Conference +on Artificial Intelligence (IJCAI), 2022a. +Ge Gao, Song Ju, Markel Sanz Ausin, and Min Chi. Hope: Human-centric off-policy evaluation for +e-learning and healthcare. In International Conference on Autonomous Agents and Multiagent +Systems (AAMAS), 2023. +Qitong Gao, Davood Hajinezhad, Yan Zhang, Yiannis Kantaros, and Michael M Zavlanos. Reduced +variance deep reinforcement learning with temporal logic specifications. In Proceedings of the +10th ACM/IEEE International Conference on Cyber-Physical Systems, pp. 237–248, 2019. +Qitong Gao, Michael Naumann, Ilija Jovanov, Vuk Lesi, Karthik Kamaravelu, Warren M Grill, +and Miroslav Pajic. Model-based design of closed loop deep brain stimulation controller using +reinforcement learning. In 2020 ACM/IEEE 11th International Conference on Cyber-Physical +Systems (ICCPS), pp. 108–118. IEEE, 2020a. +Qitong Gao, Miroslav Pajic, and Michael M Zavlanos. Deep imitative reinforcement learning for +temporal logic robot motion planning with noisy semantic observations. In 2020 IEEE International +Conference on Robotics and Automation (ICRA), pp. 8490–8496. IEEE, 2020b. +Qitong Gao, Stephen L Schmidt, Karthik Kamaravelu, Dennis A Turner, Warren M Grill, and +Miroslav Pajic. Offline policy evaluation for learning-based deep brain stimulation controllers. In +2022 ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS), pp. 80–91. +IEEE, 2022b. +10 + +Published as a conference paper at ICLR 2023 +Qitong Gao, Dong Wang, Joshua D Amason, Siyang Yuan, Chenyang Tao, Ricardo Henao, Majda +Hadziahmetovic, Lawrence Carin, and Miroslav Pajic. Gradient importance learning for incomplete +observations. International Conference on Learning Representations, 2022c. +Stephen Gould, Tianshi Gao, and Daphne Koller. Region-based segmentation and object detection. +Advances in Neural Information Processing Systems, 22, 2009. +Danijar Hafner, Timothy Lillicrap, Ian Fischer, Ruben Villegas, David Ha, Honglak Lee, and James +Davidson. Learning latent dynamics for planning from pixels. In International conference on +machine learning, pp. 2555–2565. PMLR, 2019. +Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning +behaviors by latent imagination. In International Conference on Learning Representations, 2020a. +Danijar Hafner, Timothy P Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with +discrete world models. In International Conference on Learning Representations, 2020b. +Boris Hanin and David Rolnick. How to start training: The effect of initialization and architecture. +Advances in Neural Information Processing Systems, 31, 2018. +Botao Hao, Xiang Ji, Yaqi Duan, Hao Lu, Csaba Szepesvari, and Mengdi Wang. Bootstrapping +fitted q-evaluation for off-policy inference. In International Conference on Machine Learning, pp. +4074–4084. PMLR, 2021. +Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 9(8): +1735–1780, 1997. +Nan Jiang and Lihong Li. Doubly robust off-policy value evaluation for reinforcement learning. In +International Conference on Machine Learning, pp. 652–661. PMLR, 2016. +Diederik P Kingma and Max Welling. +Auto-encoding variational bayes. +arXiv preprint +arXiv:1312.6114, 2013. +Ilya Kostrikov and Ofir Nachum. Statistical bootstrapping for uncertainty estimation in off-policy +evaluation. arXiv preprint arXiv:2007.13609, 2020. +Hoang Le, Cameron Voloshin, and Yisong Yue. Batch policy learning under constraints. In Interna- +tional Conference on Machine Learning, pp. 3703–3712. PMLR, 2019. +Alex Lee, Anusha Nagabandi, Pieter Abbeel, and Sergey Levine. Stochastic latent actor-critic: Deep +reinforcement learning with a latent variable model. Advances in Neural Information Processing +Systems, 33, 2020. +Lihong Li, Wei Chu, John Langford, and Xuanhui Wang. Unbiased offline evaluation of contextual- +bandit-based news article recommendation algorithms. In Proceedings of the fourth ACM Interna- +tional Conference on Web Search and Data Mining, pp. 297–306, 2011. +Cong Lu, Philip J Ball, Tim GJ Rudner, Jack Parker-Holder, Michael A Osborne, and Yee Whye Teh. +Challenges and opportunities in offline reinforcement learning from visual observations. arXiv +preprint arXiv:2206.04779, 2022. +Travis Mandel, Yun-En Liu, Sergey Levine, Emma Brunskill, and Zoran Popovic. Offline policy +evaluation across representations with applications to educational games. In AAMAS, volume 1077, +2014. +Rishabh Mehrotra, James McInerney, Hugues Bouchard, Mounia Lalmas, and Fernando Diaz. +Towards a fair marketplace: Counterfactual evaluation of the trade-off between relevance, fairness & +satisfaction in recommendation systems. In Proceedings of the 27th ACM International Conference +on Information and Knowledge Management, pp. 2243–2251, 2018. +Ofir Nachum, Yinlam Chow, Bo Dai, and Lihong Li. Dualdice: Behavior-agnostic estimation of +discounted stationary distribution corrections. Advances in Neural Information Processing Systems, +32:2318–2328, 2019. +11 + +Published as a conference paper at ICLR 2023 +Doina Precup. Eligibility traces for off-policy policy evaluation. Computer Science Department +Faculty Publication Series, pp. 80, 2000. +Rafael Rafailov, Tianhe Yu, Aravind Rajeswaran, and Chelsea Finn. Offline reinforcement learning +from images with latent space models. In Learning for Dynamics and Control, pp. 1154–1168. +PMLR, 2021. +Ignacio Rocco, Relja Arandjelovi´c, and Josef Sivic. End-to-end weakly-supervised semantic align- +ment. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. +6917–6925, 2018. +Simone Rossi, Pietro Michiardi, and Maurizio Filippone. Good initializations of variational bayes for +deep models. In International Conference on Machine Learning, pp. 5487–5497. PMLR, 2019. +Oleh Rybkin, Chuning Zhu, Anusha Nagabandi, Kostas Daniilidis, Igor Mordatch, and Sergey Levine. +Model-based reinforcement learning via latent-space collocation. In International Conference on +Machine Learning, pp. 9190–9201. PMLR, 2021. +Yuta Saito, Takuma Udagawa, Haruka Kiyohara, Kazuki Mogi, Yusuke Narita, and Kei Tateno. +Evaluating the robustness of off-policy evaluation. In Fifteenth ACM Conference on Recommender +Systems, pp. 114–123, 2021. +Yassir Saquil, Da Chen, Yuan He, Chuan Li, and Yong-Liang Yang. Multiple pairwise ranking +networks for personalized video summarization. In Proceedings of the IEEE/CVF International +Conference on Computer Vision, pp. 1718–1727, 2021. +Avi Segal, Kobi Gal, Ece Kamar, Eric Horvitz, and Grant Miller. Optimizing interventions via offline +policy evaluation: Studies in citizen science. In Thirty-Second AAAI Conference on Artificial +Intelligence, 2018. +David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, +Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering +the game of go with deep neural networks and tree search. Nature, 529(7587):484–489, 2016. +Shengpu Tang and Jenna Wiens. Model selection for offline reinforcement learning: Practical +considerations for healthcare settings. In Machine Learning for Healthcare Conference, pp. 2–35. +PMLR, 2021. +Ziyang Tang, Yihao Feng, Lihong Li, Dengyong Zhou, and Qiang Liu. Doubly robust bias reduction +in infinite horizon off-policy estimation. In International Conference on Learning Representations, +2019. +Philip Thomas and Emma Brunskill. Data-efficient off-policy policy evaluation for reinforcement +learning. In International Conference on Machine Learning, pp. 2139–2148. PMLR, 2016. +Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Michaël Mathieu, Andrew Dudzik, Junyoung +Chung, David H Choi, Richard Powell, Timo Ewalds, Petko Georgiev, et al. Grandmaster level in +starcraft ii using multi-agent reinforcement learning. Nature, 575(7782):350–354, 2019. +Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive learning +for self-supervised visual pre-training. In Proceedings of the IEEE/CVF Conference on Computer +Vision and Pattern Recognition, pp. 3024–3033, 2021. +Junfeng Wen, Bo Dai, Lihong Li, and Dale Schuurmans. Batch stationary distribution estimation. In +International Conference on Machine Learning, pp. 10203–10213. PMLR, 2020. +Mengjiao Yang, Ofir Nachum, Bo Dai, Lihong Li, and Dale Schuurmans. Off-policy evaluation via +the regularized lagrangian. Advances in Neural Information Processing Systems, 33:6551–6561, +2020. +Mengjiao Yang, Bo Dai, Ofir Nachum, George Tucker, and Dale Schuurmans. Offline policy selection +under uncertainty. In Deep RL Workshop NeurIPS 2021, 2021. +12 + +Published as a conference paper at ICLR 2023 +Yuling Yao, Aki Vehtari, Daniel Simpson, and Andrew Gelman. Yes, but did it work?: Evaluating +variational inference. In International Conference on Machine Learning, pp. 5581–5590. PMLR, +2018. +Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Y Zou, Sergey Levine, Chelsea Finn, +and Tengyu Ma. Mopo: Model-based offline policy optimization. Advances in Neural Information +Processing Systems, 33:14129–14142, 2020. +Tianhe Yu, Aviral Kumar, Rafael Rafailov, Aravind Rajeswaran, Sergey Levine, and Chelsea Finn. +Combo: Conservative offline model-based policy optimization. Advances in neural information +processing systems, 34:28954–28967, 2021. +Marvin Zhang, Sharad Vikram, Laura Smith, Pieter Abbeel, Matthew Johnson, and Sergey Levine. +Solar: Deep structured representations for model-based reinforcement learning. In International +Conference on Machine Learning, pp. 7444–7453. PMLR, 2019. +Michael R Zhang, Thomas Paine, Ofir Nachum, Cosmin Paduraru, George Tucker, Mohammad +Norouzi, et al. Autoregressive dynamics models for offline policy evaluation and optimization. In +International Conference on Learning Representations, 2020a. +Ruiyi Zhang, Bo Dai, Lihong Li, and Dale Schuurmans. Gendice: Generalized offline estimation of +stationary values. In International Conference on Learning Representations, 2020b. +Shangtong Zhang, Bo Liu, and Shimon Whiteson. Gradientdice: Rethinking generalized offline +estimation of stationary values. In International Conference on Machine Learning, pp. 11194– +11203. PMLR, 2020c. +13 + +Published as a conference paper at ICLR 2023 +Figure 9: The Gym-Mujoco and Adroit environments considered by the D4RL branch of DOPE. +A +ADDITIONAL EXPERIMENTAL DETAILS AND RESULTS +Additional Results and Discussions. +Rank correlations, regret@1 and MAEs for all 32 tasks are +documented in Tables 1- 6 below.6 The mean and standard deviation (in subscripts) over 3 random +seeds are reported. Note that in each column, performance of multiple methods may be highlighted in +bold, meaning they all achieve the best performance and do not significantly outperform each other. +The fact that VLBM outperforms the ablation baselines in most cases suggests that the RSA loss +term and branching architecture can effectively increase model expressiveness, and allow to learn the +dynamics underlying the MDP more accurately and robustly from offline data that provide limited +exploration coverage. Yet, smaller margins are attained between the VLBM and VLM+RSA in +Hopper-M-E and Hopper-M. It is likely because Hopper has relatively lower dimensional state space +compared to the other three environments, from which the underlying dynamics can be sufficiently +captured by the VLM+RSA. Fig. 10 and 11 shows the correlation between estimated (y-axis) and true +returns (x-axis) for all the OPE tasks we consider. It can be found that for Halfcheetah-R, -M-R, -M, +most of the model-based methods cannot significantly distinguish the returns across target policies. +The cause could be that the offline trajectories provided for this task are relatively more challenging, +compared to the other OPE tasks. Such an effect appears to affect IS, VPM, DICE, DR and FQE at +larger scale. It can be observed from the scatter plots reported in the DOPE benchmark (Fu et al., +2020b) that these methods could hardly tell the scale of returns across different target policies; as the +dots almost form a horizontal line in each plot. However, the estimated returns from VLBM and IS +still preserve the rank, which leads to high rank correlations and low regrets. +Implementation Details and Hyper-parameter. +The model-based methods are evaluated by di- +rectly interacting with each target policy for 50 episodes, and the mean of discounted total returns +(γ = 0.995) over all episodes is used as estimated performance for the policy. We choose the +neural network architectures as follows. For the components involving LSTMs, which include +qψ(zt|zt−1, at−1, st) and pφ(zt|zt−1, at−1), their architecture include one LSTM layer with 64 +nodes, followed by a dense layer with 64 nodes. All other components do not have LSTM layers +involved, so they are constituted by a neural network with 2 dense layers, with 128 and 64 nodes +respectively. The output layers that determine the mean and diagonal covariance of diagonal Gaussian +distributions use linear and softplus activations, respectively. The ones that determine the mean +of Bernoulli distributions (e.g., for capturing early termination of episodes) are configured to use +sigmoid activations. VLBM and the two ablation baselines, VLM and VLM+RSA, are trained using +offline trajectories provided by DOPE, with max_iter in Alg. 1 set to 1,000 and minibatch size +set to 64. Adam optimizer is used to perform gradient descent. To determine the learning rate, we +perform grid search among {0.003, 0.001, 0.0007, 0.0005, 0.0003, 0.0001, 0.00005}. Exponential +decay is applied to the learning rate, which decays the learning rate by 0.997 every iteration. To train +VLBM, we set the constants from equation 10 following C1 = C2, and perform grid search among +6Some VPM entries are absent since they were not reported in Fu et al. (2020b), nor the code is open-sourced. +14 + +Ant +Halfcheetah +Hopper +Walker2d +Pen +Door +Hammer +RelocatePublished as a conference paper at ICLR 2023 +{5, 1, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0001}. To train VLM+RSA, the constant C from equation 8 is +determined by grid search among the same set of parameters above. L2-regularization with decay +of 0.001 and batch normalization are applied to all hidden layers. Consider that some of the envi- +ronments (e.g., Ant, Hopper, Walker2d, Pen) may terminate an episode, before timeout, if the state +meets specific conditions; details for VLBM to capture such early termination behavior is introduced +in Appendix D. +The DOPE Benchmark. +The deep OPE (DOPE) benchmark (Fu et al., 2020b) provides stan- +dardized training and evaluation procedure for OPE works to follow, which facilitates fair and +comprehensive comparisons among various OPE methods. Specifically, it utilizes existing environ- +ments and training trajectories provided by D4RL7 and RLUnplugged8, which are two benchmark +suites for offline RL training, and additionally provide target policies for OPE methods to evaluate. +In the D4RL branch, the training trajectories are originally collected from various sources including +random exploration, human teleoperation, and RL-trained policies with limited exploration; thus, +can provide varied levels of coverage over the state-action space. Moreover, the target policies are +trained using online RL algorithms, which can in general lead to different state-action visitations +than in the training trajectories. We leverage the D4RL branch as our test base, since the OPE tasks +it provides are considered challenging, i.e., the limited coverage introduced by training data, as +well as the discrepancy between the behavioral and target policies. Graphical illustrations of the +Gym-Mujoco and Adroit environments considered are shown in Fig. 9. Details on the environments +and datasets used are shown in Tables 7 and 8, from the perspectives of state and action dimensions, +if episodes can be terminated before timeout, if controls are performed over continuous space, and +the size of the offline trajectories used for training. In contrast, in the RLUnplugged branch, the +training trajectories are always collected using online RL training, which can result in adequate +coverage over the state-action space. The target policies are trained by applying offline RL over the +training trajectories, so that behavioral and target policies can lead to similar state-action visitation +distributions. As discussed in DOPE (Fu et al., 2020b), such tasks are suitable for studies where ideal +data are needed, such as complexity comparisons. +Evaluation Metrics. +Following from (Fu et al., 2020b), we consider rank correlation, regret@1 +and mean absolute error (MAE) as the evaluation metrics. Specifically, rank correlation measures the +strength and direction of monotonic association between the rank of OPE-estimated returns and true +returns over all target policies. It is is captured by Spearsman’s correlation coefficient between the +ordinal rankings between estimated and true returns. Regret@1 is captured by the difference between +the return of the policy corresponding to the highest return as estimated by OPE and the return of the +policy that actually produces the highest true return. In other words, regret@1 evaluates how worse +the policy resulting in the highest OPE-estimated return would perform than the actual best policy. +The two metrics above evaluate how useful OPE would be to facilitate important applications such as +policy selection. Finally, we also consider MAE which is commonly used in estimation/regression +tasks. Mathematical definitions of these metrics can be found in (Fu et al., 2020b). +Implementation of AR Ensembles. +For fair comparisons with VLBM, in experiments we train +an ensemble of the state-of-the-art model-based OPE method, auto-regressive (AR) models (Zhang +et al., 2020a), as one of the baselines. Specifically, we train an ensemble of 10 AR models to learn +p(st+1, rt|st, at) following the auto-regressive manner, with each individual model following the +design introduced in (Zhang et al., 2020a), i.e., +s(j) +t+1 ∼ p(s(j) +t+1|st, at, s(1) +t+1, . . . , s(j−1) +t+1 ), +(11) +with s(j) +t+1 representing the element located at the j-th dimension of the state variable, and D the +dimension of state space. The reward is treated as an additional dimension of the states, i.e., +rt ∼ p(rt|st, at, s(1) +t+1, . . . , s(D) +t+1). However, in the original literature (Zhang et al., 2020a) it does not +introduce in details regarding which specific ensemble architecture is used (e.g., overall averaging +or weighted averaging). As a result, we choose the same weighted averaging procedure as used in +VLBM branching, to sort out the influence of different ensemble architectures and facilitate fair +comparisons. Specifically, a total of 10 AR models, parameterized by {θ1, . . . , θ10}, along with 10 +7https://github.com/rail-berkeley/d4rl +8https://github.com/deepmind/deepmind-research/tree/master/rl_unplugged +15 + +Published as a conference paper at ICLR 2023 +weight variables {wθ +1, . . . , wθ +10| � +i wθ +i = 1}, are trained. Similar to weighted averaging architecture +used in VLBM, i.e., equation 9, the mean and variance of the prediction s(j) +t+1, captured by normal +distribution N(µ, σ2), follow +µ = +�10 +i=1 wθ +i · µθi(s(j) +t+1), +σ2 = +�10 +i=1(wθ +i )2 · σ2 +θi(s(j) +t+1), +(12) +where µθi(s(j) +t+1) and σ2 +θi(s(j) +t+1) are the mean and variance produced from each individual AR model +in the ensemble. +Training Resources. +Training of the proposed method, and baselines, are facilitated by Nvidia +Quadro RTX 6000, NVIDIA RTX A5000, and NVIDIA TITAN XP GPUs. +License. +The use of DOPE9 and D4RL (Fu et al., 2020a) follow the Apache License 2.0. +9https://github.com/google-research/deep_ope +16 + +Published as a conference paper at ICLR 2023 +Figure 10: Scatter plots between OPE-estimated (y-axis) and true (x-axis) returns over all 20 Gym- +Mujoco tasks that are considered. Part 1. +17 + +Published as a conference paper at ICLR 2023 +Figure 11: Scatter plots between OPE-estimated (y-axis) and true (x-axis) returns over all 20 Gym- +Mujoco tasks that are considered. Part 2. +18 + +0000003 +29 +21 +24 +191 +324 +1 +51 +86 +59 +0Published as a conference paper at ICLR 2023 +B +MORE t-SNE VISUALIZATIONS +Figure 12: t-SNE visualization over the latent space captured by VLM, illustrating encoded state- +action visitations induced from all target policies. Each point is colored by the corresponding policy +from which it is generated. Policies in the legend are sorted in the order of increasing performance. +Figure 13: t-SNE visualization over the latent space captured by VLM+RSA(MSE), illustrating +encoded state-action visitations induced from all target policies. Each point is colored by the +corresponding policy from which it is generated. Policies in the legend are sorted in the order of +increasing performance. +Figures 12 and 13 above visualize the latent space captured by two ablation baselines, VLM and +VLM+RSA(MSE), respectively. It can be observed that comparing to the latent space captured by +VLM are not disentangled well compared to VLBM (shown in Figure 8), as the state-action pairs +induced by policies with different levels of performance are generally cluster together without explicit +boundaries. Such a finding illustrated the importance of the use of RSA loss (7) empirically, as it +can effectively regularize pψ(zt|zt−1, at−1, st) and allows the encoder to map the MDP states to +an expressive and compact latent space from which the decoder can reconstruct states and rewards +accurately. Moreover, Figure 13 shows that the latent representations of the state-action pairs captured +by VLM+RSA(MSE) distributed almost uniformly over the latent space. This justifies the rationale +provided in Sec. 2.3 where MSE is too strong to regularize the hidden states of the encoder and +decoder, and is also consistent with the results reported in Figure 3 that MSE+RSA(MSE) performs +worse than VLM in general. +19 + +0 +Lowest +Performance +6 +8 +Highest +Hopper-M-E +9 +Halfcheetah-M-E +Walker2d-M-E +Ant-M-E +Performance +10Tt +0 +Lowest +Performance +2 +w +4 +5 +6 +8 +Highest +9 +Halfcheetah-M-E +Walker2d-M-E +Ant-M-E +Hopper-M-E +10 +PerformancePublished as a conference paper at ICLR 2023 +C +ALGORITHMS FOR TRAINING AND EVALUATING VLBM +Algorithm 1: Train VLBM. +Input: Model weights ψ, φ1, . . . , φB, w1, . . . , wB, offline trajectories ρβ, and learning rate α. +Begin: +1: Initialize ψ, φ1, . . . , φB, w1, . . . , wB +2: for iter in 1 : max_iter do +3: +Sample a trajectory [(s0, a0, r0, s1), . . . , (sT −1, aT −1, rT −1, sT )] ∼ ρβ +4: +zψ +0 ∼ qψ(z0|s0) +5: +zφb +0 +∼ p(z0), for all b ∈ [1, B] +6: +Run forward pass of VLBM following (3), (5) and (9) for t = 1 : T, and collect all variables +needed to evaluate LV LBM as specified in (10). +7: +ψ ← ψ + α∇ψLV LBM +8: +for b in 1 : B do +9: +φb ← φb + α∇φbLV LBM +10: +wb ← wb + α∇wbLV LBM +11: +end for +12: end for +Algorithm 2: Evaluate VLBM. +Input: Trained model weights ψ, φ1, . . . , φB, w1, . . . , wB +Begin: +1: Initialize the list that stores the accumulated returns over all episodes R = [] +2: for epi in 1 : max_epi do +3: +Initialize the variable r = 0 that tracks the accumulated return for the current episode +4: +Initialize latent states from the prior, i.e., zφb +0 +∼ p(z0) for all b ∈ [1, B] +5: +Initialize LSTM hidden states hφb +0 = 0 for all b ∈ [1, B] +6: +Sample sφb +0 ∼ pφ(s0|zφb +t ) for all b ∈ [1, B] and generate initial MDP state sφ +0 following (9) +7: +for t in 1 : T do +8: +Determine the action following the target policy π, i.e., at−1 ∼ π(at−1|sφ +t−1) +9: +for b in 1 : B do +10: +Update hφb +t , ˜hφb +t , zφb +t , sφb +t , rφb +t−1 following (5). +11: +end for +12: +Generate the next state sφ +t following (9), as well as the reward rφ +t−1 ∼ +pφ(rt−1|zφ1 +t , . . . , zφB +t +) = N +� +µ = � +b wb · µ(rφb +t−1), Σdiag = � +b w2 +b · Σdiag(rφb +t−1) +� +13: +Update r ← r + γt−1rφ +t−1, with γ being the discounting factor +14: +end for +15: +Append r into R +16: end for +17: Average over all elements in R, which serves as the estimated return over π +20 + +Published as a conference paper at ICLR 2023 +D +EARLY TERMINATION OF ENVIRONMENTS +Given that some Gym-Mujoco environments, including Ant, Hopper, Walker2d and Pen, may +terminate an episode before reaching the maximum steps, if the state violates specific constraints. +Below we introduce how VLM and VLBM can be enriched to capture such early termination +behaviors. +VLM +For VLM, we introduce an additional component dφ +t ∼ pφ(dt|zφ +t ) to the generative pro- +cess equation 5, where dφ +t is a Bernoulli variable determining if an episode should be terminated at its +t-th step. Specifically, pφ(dt|zφ +t ) follows Bernoulli distribution, with mean determined by an MLP +with sigmoid activation applied to the output layer. As a result, the generative process now follows +hφ +t = fφ(hφ +t−1, zφ +t−1, at−1), +˜hφ +t = gφ(hφ +t ), +zφ +t ∼ pφ(˜hφ +t ), +sφ +t ∼ pφ(st|zφ +t ), +rφ +t−1 ∼ pφ(rt−1|zφ +t ), +dφ +t ∼ pφ(dt|zφ +t ), +at ∼ π(at|sφ +t ). +(13) +Moreover, we add in a new term to VLM’s training objective, in order to update the component +introduced above during training, i.e., +Learly_term +V LM +(ψ, φ) = LV LM(ψ, φ) + +�T +t=0 log pφ(dt|zt), +(14) +with LV LM(ψ, φ) being the original objective of VLM, as presented in equation 8. +VLBM +For VLBM, the termination of an episode is determined following, i.e., +dφ +t ∼ pφ(dt|zφ1 +t , . . . , zφB +t +) = Bernoulli(µ = +� +b +wb · µd(dφb +t )), +(15) +where µd(dφb +t ) = φMLP +b,µd (zφb +t ) is the mean of dφb +t +produced from the b-th branch of the decoder, and +φMLP +b,µd +is the corresponding MLP that maps zφb +t +to µd(dφb +t ). To update the components involved in +the procedure above, we introduce a new term to the VLBM’s objective, i.e., +Learly_term +V LBM +(ψ, φ1, . . . , φB, w1, · · · , wB) +(16) +=LV LBM(ψ, φ1, . . . , φB, w1, · · · , wB) + +�T +t=0 log pφ(dφ +t |zφ1 +t , . . . , zφB +t +), +(17) +with LV LBM being the original objective of VLBM, as presented in equation 10. +21 + +Published as a conference paper at ICLR 2023 +E +BOUND DERIVATION +We now derive the evidence lower bound (ELBO) for the joint log-likelihood distribution, i.e., +log pφ(s0:T , r0:T −1) +(18) += log +� +z1:T ∈Z +pφ(s0:T , z1:T , r0:T −1)dz +(19) += log +� +z1:T ∈Z +pφ(s0:T , z1:T , r0:T −1) +qψ(z0:T |s0:T , a0:T −1) qψ(z0:T |s0:T , a0:T −1)dz +(20) +≥Eqψ[log p(z0) + log pφ(s0:T , z1:T , r0:T −1|z0) − log qψ(z0:T |s0:T , a0:T −1)] +(21) +=Eqψ +� +log p(z0) + log pφ(s0|z0) + +�T +t=1 log pφ(st, zt, rt−1|zt−1, at−1) +− log qψ(z0|s0) − +�T +t=1 log qψ(zt|zt−1, at−1, st) +� +(22) +=Eqψ +� +log p(z0) − log qψ(z0|s0) + log pφ(s0|z0) + +�T +t=1 log +� +pφ(st|zt)pφ(rt−1|zt)pφ(zt|zt−1, at−1) +� +− +�T +t=1 log qψ(zt|zt−1, at−1, st) +� +(23) +=Eqψ +� �T +t=0 log pφ(st|zt) + +�T +t=1 log pφ(rt−1|zt) +− KL +� +qψ(z0|s0)||p(z0) +� +− +�T +t=1 KL +� +qψ(zt|zt−1, at−1, st)||pφ(zt|zt−1, at−1) +�� +. +(24) +Note that the transition from equation 20 to equation 21 follows Jensen’s inequality. +22 + +Published as a conference paper at ICLR 2023 +F +BASICS OF VARIATIONAL INFERENCE +Classic variational auto-encoders (VAEs) are designed to generate synthetic data that share similar +characteristics than the ones used for training (Kingma & Welling, 2013). Specifically, VAEs learn +an approximated posterior qψ(z|x) and a generative model pφ(x|z), over the prior p(z), with x being +the data and z the latent variable. It’s true posterior pφ(z|x) is intractable, i.e., +pφ(z|x) = pφ(x|z)p(z) +pφ(x) +; +(25) +since the marginal likelihood in the denominator, pφ(x) = +� +z pφ(x|z)p(z)dz, requires integration +over the unknown latent space. For the same reason, VAEs cannot be trained to directly maximize +the marginal log-likelihood, max log pφ(x). To resolve this, one could maximize a lower bound of +pφ(x), i.e., +max +ψ,φ −KL(qψ(z|x)||p(z)) + Eqψ[log pφ(x|z)], +(26) +which is the evidence lower bound (ELBO). +Reparameterization. +During training, it is required to sample from qψ(z|x) and pφ(x|z) constantly. +The reparameterization technique is introduced in (Kingma & Welling, 2013), to ensure that the +gradients can flow through such sampling process during back-propagation. For example, if both +distributions (qψ(z|x) and pφ(x|z)) follow diagonal Gaussians, with mean and diagonal covariance +determined by MLPs, i.e., +z ∼ qψ(z|x) = N +� +µ = ψMLP +µ +(x), +Σ = ψMLP +Σ +(x) +� +, +(27) +x ∼ pφ(x|z) = N +� +µ = φMLP +µ +(z), +Σ = φMLP +Σ +(z) +� +; +(28) +here, ψMLP +µ +, ψMLP +Σ +, φMLP +µ +, φMLP +Σ +are the MLPs that generate the means and covariances. The +sampling processes above can be captured by reparameterization, i.e., +z = ψMLP +µ +(x) + ψMLP +Σ +(x) · ϵ, +(29) +x = φMLP +µ +(z) + φMLP +Σ +(z) · ϵ, +(30) +with ϵ ∼ N(0, I). Consequently, the gradients over ψ and φ can be calculated following the chain +rule, and used for back-propagation during training. We direct readers to (Kingma & Welling, 2013) +for a comprehensive review of reparameterization. +23 + +Published as a conference paper at ICLR 2023 +G +ADDITIONAL RELATED WORKS +Overview of latent-model based RL methods. +In SLAC, latent representations are used to im- +prove the sample efficiency of model-free RL training algorithms, by jointly modeling and learning +dynamics and controls over the latent space. Similarly, SOLAR improves data efficiency for multi- +task RL by first learning high-level latent representations of the environment, which can be shared +across different tasks. Then, local dynamics models are inferred from the abstraction, with con- +trols solved by linear-quadratic regulators. PlaNet and Dreamer further improve the architecture +and training objectives of latent models, allowing them to look ahead multiple steps and plan for +longer horizon. There also exist LatCo which directly performs trajectory optimization over the +latent space, allowing the agent to temporarily bypass dynamical constraints and quickly navigate +to the high-reward regions in early training stage. To summarize, methods above leverage latent +representations to gain sufficient exploration coverage and quickly navigate to high-reward regions, +improving sample efficiency for policy optimization. Note that they mostly require online interactions +with the environment to formulate a growing experience replay buffer for policy learning, which have +different goals than OPE which requires learning from a fixed set of offline trajectories. +24 + +Published as a conference paper at ICLR 2023 +Rank Corr. +Ant +-E +Ant +-M-E +Ant +-M +Ant +-M-R +Ant +-R +IS +.14.41 +−.21.35 +−.17.32 +.07.39 +.26.34 +VPM +−.42.38 +−.28.28 +−.2.31 +−.26.29 +.24.31 +DICE +−.13.37 +−.33.4 +−.36.28 +−.24.39 +−.21.35 +DR +−.28.32 +.35.35 +.66.26 +.45.32 +.01.33 +FQE +−.13.32 +.37.35 +.65.25 +.57.28 +.04.33 +AR Ensemble +.40.12 +.44.25 +.56.01 +.54.16 +.48.17 +VLM +.28.14 +.39.16 +.37.03 +.37.19 +.36.07 +VLM+RSA (MSE) +.33.11 +.29.13 +.35.22 +.30.42 +.17.14 +VLM+RSA +.40.03 +.53.19 +.42.12 +.53.19 +.40.11 +VLM+RSA Ens. +.62.16 +.76.02 +.65.07 +.62.13 +0..60 +VLBM +.79.01 +.81.05 +.65.06 +.59.14 +.78.24 +Rank Corr. +Halfcheetah +-E +Halfcheetah +-M-E +Halfcheetah +-M +Halfcheetah +-M-R +Halfcheetah +-R +IS +.01.35 +−.06.37 +.80.11 +.59.26 +−.24.36 +VPM +.18.35 +−.47.29 +- +−.07.36 +.27.36 +DICE +−.44.30 +−.08.35 +−.26.07 +−.15.41 +−.70.22 +DR +.77.17 +.62.27 +.32.32 +.32.37 +−.02.38 +FQE +.78.15 +.62.27 +.34.17 +.26.37 +−.11.41 +AR Ensemble +.65.11 +.65.07 +.60.09 +.59.14 +.60.06 +VLM +.75.19 +.45.06 +.33.1 +.64.06 +.43.09 +VLM+RSA (MSE) +.54.31 +.49.03 +.6.08 +.47.11 +.13.27 +VLM+RSA +.80.17 +.54.08 +.65.21 +.61.03 +.51.08 +VLM+RSA Ens. +.71.14 +.66.08 +.64.02 +.60.05 +.45.17 +VLBM +.88.01 +.74.13 +.81.13 +.64.04 +.60.06 +Rank Corr. +Walker2d +-E +Walker2d +-M-E +Walker2d +-M +Walker2d +-M-R +Walker2d +-R +IS +.22.37 +.24.33 +−.25.35 +.65.24 +−.05.38 +VPM +.17.32 +.49.37 +.44.21 +−.52.25 +−.42.34 +DICE +−.37.27 +−.34.34 +.12.38 +.55.23 +−.19.36 +DR +.26.34 +.19.33 +.02.37 +−.37.39 +.16.29 +FQE +.35.33 +.25.32 +−.09.36 +−.19.36 +.21.31 +AR Ensemble +.54.11 +.25.33 +.55.14 +.38.17 +.36.29 +VLM +.57.13 +.16.13 +.18.30 +.39.18 +.44.18 +VLM+RSA (MSE) +.27.28 +.20.25 +.09.18 +.10.11 +.36.19 +VLM+RSA +.56.11 +.57.11 +.46.08 +.43.14 +.59.29 +VLM+RSA Ens. +.62.17 +.57.25 +.43.20 +−.14.09 +.39.14 +VLBM +.70.13 +.55.17 +.66.15 +.60.07 +.72.14 +Rank Corr. +Hopper +-E +Hopper +-M-E +Hopper +-M +Hopper +-M-R +Hopper +-R +IS +.37.27 +.35.26 +−.55.26 +−.16.03 +.23.34 +VPM +.21.32 +- +.13.37 +−.16.03 +−.46.20 +DICE +−.08.32 +.08.14 +.19.33 +.27.28 +−.13.39 +DR +−.41.27 +−.08.30 +−.31.34 +.05.17 +−.19.36 +FQE +−.33.30 +.01.08 +−.29.33 +.45.13 +−.11.36 +AR Ensemble +.23.30 +.14.29 +.53.03 +.28.18 +.26.10 +VLM +−.05.22 +.22.11 +.34.08 +.46.21 +.36.03 +VLM+RSA (MSE) +−.18.24 +.05.09 +.51.20 +.43.18 +.58.14 +VLM+RSA +.15.28 +.26.10 +.51.11 +.53.06 +.55.19 +VLM+RSA Ens. +.09.21 +.13.12 +−.01.3 +.66.07 +.63.16 +VLBM +.28.16 +.32.10 +.70.03 +.75.07 +.77.04 +Table 1: Rank correlation between estimated and ground-truth returns for all Gym-Mujoco tasks. +Results are obtained by averaging over 3 random seeds used for training, with standard deviations +shown in subscripts. +25 + +Published as a conference paper at ICLR 2023 +Rank Corr. +Door +human +Door +cloned +Door +expert +Pen +human +Pen +cloned +Pen +expert +IS +−.12.35 +.66.22 +.76.17 +.28.28 +.71.08 +−.45.31 +VPM +- +−.29.36 +.65.23 +- +- +.08.33 +DICE +−.02.20 +.18.31 +−.06.32 +.17.33 +−.07.26 +−.53.30 +DR +.01.18 +.60.28 +.76.13 +−.36.29 +.39.25 +.52.28 +FQE +.07.09 +.55.27 +.89.09 +−.31.21 +.06.42 +−.01.33 +AR Ens. +.58.06 +.52.13 +.61.07 +.33.07 +.42.08 +.60.09 +VLBM +.80.14 +.78.18 +.93.03 +.34.17 +.82.07 +.58.15 +Rank Corr. +Hammer +human +Hammer +cloned +Hammer +expert +Relocate +human +Relocate +cloned +Relocate +expert +IS +.39.07 +.58.27 +.64.24 +−.23.07 +−.22.18 +.52.23 +VPM +- +−.77.22 +.39.31 +- +- +.39.31 +DICE +.11.18 +.35.38 +−.42.31 +−.23.16 +.22.16 +−.27.34 +DR +−.04.25 +−.70.20 +.49.31 +.65.19 +.10.16 +−.40.24 +FQE +.14.10 +−.15.33 +.29.34 +.62.11 +.15.17 +−.57.28 +AR Ens. +.44.12 +.40.20 +.53.11 +.42.23 +.30.10 +.54.23 +VLBM +.34.14 +.58.18 +.70.20 +.68.17 +.80.04 +.58.17 +Table 2: Rank correlation between estimated and ground-truth returns for all Adroit tasks. Results +are obtained by averaging over 3 random seeds used for training, with standard deviations shown in +subscripts. +26 + +Published as a conference paper at ICLR 2023 +Regret@1 +Ant +-E +Ant +-M-E +Ant +-M +Ant +-M-R +Ant +-R +IS +.47.32 +.46.18 +.61.18 +.16.23 +.56.22 +VPM +.88.22 +.32.24 +.4.21 +.72.43 +.15.24 +DICE +.62.15 +.60.16 +.43.1 +.64.13 +.50.29 +DR +.43.22 +.37.13 +.12.18 +.05.09 +.28.15 +FQE +.43.22 +.36.14 +.12.18 +.05.09 +.28.15 +AR Ensemble +.18.09 +.17.20 +.050 +.31.20 +.03.02 +VLM +.38.24 +.07.02 +.20.25 +.08.02 +.14.16 +VLM+RSA (MSE) +.050. +.26.21 +.28.4 +.48.33 +.43.44 +VLM+RSA +.18.09 +.13.12 +.14.16 +.17.24 +.07.02 +VLM+RSA Ens. +.13.08 +.050. +.03.02 +.03.02 +.52.37 +VLBM +.050. +.050. +.050. +.11.09 +0.0. +Regret@1 +Halfcheetah +-E +Halfcheetah +-M-E +Halfcheetah +-M +Halfcheetah +-M-R +Halfcheetah +-R +IS +.15.08 +.73.42 +.05.05 +.13.10 +.31.11 +VPM +.14.09 +.80.34 +.33.19 +.25.09 +.12.07 +DICE +.32.40 +.38.37 +.82.29 +.30.07 +.81.30 +DR +.11.08 +.14.07 +.37.15 +.33.18 +.31.10 +FQE +.12.07 +.14.07 +.38.13 +.36.16 +.37.08 +AR Ensemble +.02.03 +.11.07 +.13.10 +.07.05 +.04.05 +VLM +.11.04 +.12.06 +.25.01 +.04.03 +.230. +VLM+RSA (MSE) +.09.08 +.22.09 +.20.06 +.09.08 +.27.05 +VLM+RSA +.08.02 +.17.05 +.09.12 +.02.03 +.230. +VLM+RSA Ens. +.13.05 +.19.13 +.07.09 +.02.03 +.69.44 +VLBM +.14.04 +.09.02 +0.0. +.07.09 +.15.07 +Regret@1 +Walker2d +-E +Walker2d +-M-E +Walker2d +-M +Walker2d +-M-R +Walker2d +-R +IS +.43.26 +.13.07 +.70.39 +.02.05 +.74.33 +VPM +.09.19 +.24.42 +.08.06 +.46.31 +.88.20 +DICE +.35.36 +.78.27 +.27.43 +.18.12 +.39.33 +DR +.06.07 +.30.12 +.25.09 +.68.23 +.15.20 +FQE +.06.07 +.22.14 +.31.10 +.24.20 +.15.21 +AR Ensemble +.13.11 +.17.19 +.16.15 +.14.16 +.16.02 +VLM +.10.05 +.51.25 +.30.39 +.33.38 +.08.07 +VLM+RSA (MSE) +.49.16 +.39.30 +.43.35 +.860. +.31.29 +VLM+RSA +.10.07 +.11.02 +.18.15 +.34.37 +.08.04 +VLM+RSA Ens. +.11.04 +.14.16 +.02.02 +.860. +.58.20 +VLBM +.05.04 +.05.01 +.03.04 +.14.16 +.06.06 +Regret@1 +Hopper +-E +Hopper +-M-E +Hopper +-M +Hopper +-M-R +Hopper +-R +IS +.06.03 +.10.12 +.38.28 +.880. +.05.05 +VPM +.13.10 +- +.10.14 +- +.26.10 +DICE +.20.08 +.16.08 +.18.19 +.16.13 +.30.15 +DR +.34.35 +.34.39 +.32.32 +.34.24 +.41.17 +FQE +.41.20 +.42.08 +.32.32 +.18.23 +.36.22 +AR Ensemble +.07.05 +.23.11 +.14.09 +.06.02 +.12.11 +VLM +.76.18 +.35.22 +.22.22 +.14.15 +.07.02 +VLM+RSA (MSE) +.42.34 +.510. +.33.39 +.26.13 +.06.04 +VLM+RSA +.62.38 +.18.23 +.13.12 +.25.15 +.33.39 +VLM+RSA Ens. +.31.18 +.510. +.47.36 +.03.02 +.06.04 +VLBM +.10.03 +.10.03 +.11.11 +.040. +.03.04 +Table 3: Regret@1 for all Gym-Mujoco tasks. Results are obtained by averaging over 3 random +seeds used for training, with standard deviations shown in subscripts. +27 + +Published as a conference paper at ICLR 2023 +Regret@1 +Door +human +Door +cloned +Door +expert +Pen +human +Pen +cloned +Pen +expert +IS +.45.40 +.02.07 +.01.04 +.17.15 +.14.09 +.31.10 +VPM +.69.24 +.81.33 +.03.03 +.28.12 +.36.18 +.25.13 +DICE +.10.27 +.65.45 +.37.27 +.04.09 +.12.08 +.33.20 +DR +.05.09 +.11.08 +.05.07 +.09.08 +.13.06 +.05.07 +FQE +.05.08 +.11.06 +.03.03 +.07.05 +.12.07 +.11.14 +AR Ens. +.08.10 +.44.31 +.10.09 +.09.08 +.14.05 +.08.07 +VLBM +.03.04 +.03.04 +.02.03 +.29.07 +.08.06 +.09.02 +Regret@1 +Hammer +human +Hammer +cloned +Hammer +expert +Relocate +human +Relocate +cloned +Relocate +expert +IS +.19.30 +.03.15 +.01.04 +.63.41 +.63.41 +.18.14 +VPM +.18.29 +.72.39 +.04.07 +.77.18 +.11.29 +.76.23 +DICE +.04.08 +.67.48 +.24.34 +.97.11 +.96.18 +.97.07 +DR +.46.23 +.78.38 +.09.09 +.17.15 +.18.27 +.98.08 +FQE +.46.23 +.36.39 +.05.04 +.17.14 +.29.42 +1.00.06 +AR Ens. +.08.06 +.05.05 +0.0. +.26.33 +.63.35 +.26.33 +VLBM +.080. +0.0. +.01.01 +.08.08 +.02.02 +.07.07 +Table 4: Regret@1 for all Adroit tasks. Results are obtained by averaging over 3 random seeds used +for training, with standard deviations shown in subscripts. +28 + +Published as a conference paper at ICLR 2023 +MAE +Ant +-E +Ant +-M-E +Ant +-M +Ant +-M-R +Ant +-R +IS +605104 +604102 +594104 +603101 +606103 +VPM +607108 +604106 +570109 +612105 +57099 +DICE +558108 +471100 +49590 +583110 +53092 +DR +584114 +32666 +34566 +42172 +404106 +FQE +583122 +31967 +34564 +41079 +398111 +AR Ensemble +55181 +62914 +57435 +6421 +57561 +VLM +33115 +31520 +31031 +4866 +6632 +VLM+RSA (MSE) +34313 +3244 +3063 +46321 +6618 +VLM+RSA +3517 +31423 +30525 +4483 +6654 +VLM+RSA Ens. +24220 +31237 +34580 +4646 +66720 +VLBM +2024 +26955 +33143 +2652 +59811 +MAE +Halfcheetah +-E +Halfcheetah +-M-E +Halfcheetah +-M +Halfcheetah +-M-R +Halfcheetah +-R +IS +1404152 +1400146 +1217123 +1409154 +1405155 +VPM +945164 +1427111 +1374153 +1384148 +1411154 +DICE +944161 +1078132 +1382130 +1440158 +1446156 +DR +102595 +1015103 +1222134 +1001129 +949126 +FQE +103195 +1014101 +1211130 +1003132 +938125 +AR Ensemble +1226222 +48024 +55364 +84664 +153716 +VLM +520242 +52649 +62453 +147827 +14901 +VLM+RSA (MSE) +469159 +42649 +68939 +143210 +14890 +VLM+RSA +414155 +44650 +622153 +147320 +14926 +VLM+RSA Ens. +25320 +773139 +1306113 +146841 +152522 +VLBM +20122 +45630 +51750 +1281170 +14952 +MAE +Walker2d +-E +Walker2d +-M-E +Walker2d +-M +Walker2d +-M-R +Walker2d +-R +IS +40562 +43662 +42860 +42760 +43061 +VPM +36768 +42561 +42660 +42464 +44058 +DICE +43760 +32260 +27331 +37451 +41957 +DR +519179 +21746 +36874 +29654 +34774 +FQE +453142 +23342 +35079 +31373 +35473 +AR Ensemble +530102 +4084 +4446 +327106 +38342 +VLM +53830 +38012 +25017 +16046 +4528 +VLM+RSA (MSE) +52130 +34020 +36119 +23614 +44315 +VLM+RSA +52241 +35886 +2539 +1257 +326161 +VLM+RSA Ens. +5389 +38623 +20138 +16811 +44124 +VLBM +51724 +28872 +24433 +15628 +26222 +MAE +Hopper +-E +Hopper +-M-E +Hopper +-M +Hopper +-M-R +Hopper +-R +IS +10629 +36047 +40548 +43811 +41245 +VPM +44243 +- +43344 +- +43844 +DICE +25954 +26640 +21541 +3982 +12216 +DR +42699 +23477 +30785 +29814 +28950 +FQE +28276 +25228 +28373 +2957 +26142 +AR Ensemble +36916 +29211 +39342 +47734 +45434 +VLM +14831 +13619 +21022 +1389 +38266 +VLM+RSA (MSE) +24640 +18610 +23229 +12412 +41515 +VLM+RSA +2702 +14015 +11728 +11716 +41220 +VLM+RSA Ens. +25323 +14942 +23362 +11517 +30647 +VLBM +2668 +1404 +12647 +12421 +38527 +Table 5: MAE between estimated and ground-truth returns for all Gym-Mujoco tasks. Results are +obtained by averaging over 3 random seeds used for training, with standard deviations shown in +subscripts. +29 + +Published as a conference paper at ICLR 2023 +MAE +Door +human +Door +cloned +Door +expert +Pen +human +Pen +cloned +Pen +expert +IS +870173 +891188 +648122 +3926128 +1707128 +4547222 +VPM +862163 +1040188 +879182 +1569215 +2324129 +2325136 +DICE +1108199 +69779 +856134 +4193244 +1454219 +2963279 +DR +37965 +42473 +1353218 +2846200 +132398 +2013564 +FQE +38960 +43881 +134384 +2872170 +1232105 +1057281 +AR Ens. +7343 +8267 +223616 +216112 +1981106 +1803226 +VLBM +710152 +9331 +60084 +1637286 +669270 +1002262 +MAE +Hammer +human +Hammer +cloned +Hammer +expert +Relocate +human +Relocate +cloned +Relocate +expert +IS +73521118 +74031126 +3052608 +638217 +632215 +2731147 +VPM +71051107 +74591114 +73121117 +806166 +586135 +620214 +DICE +5677936 +4169839 +3963758 +4526474 +1347485 +1095221 +DR +5768751 +6101679 +3485590 +606116 +412124 +1193350 +FQE +6000612 +5415558 +2950728 +593113 +439125 +1351393 +AR Ens. +689727 +724012 +30578 +8237 +6626 +21384 +VLBM +6184479 +7267402 +2682146 +62425 +388183 +2021270 +Table 6: MAE between estimated and ground-truth returns for all Adroit tasks. Results are obtained +by averaging over 3 random seeds used for training. +30 + +Published as a conference paper at ICLR 2023 +State Dim. +Action Dim. +Early Term. +Continuous Ctrl. +Dataset +Dataset Size +Ant +27 +8 +Yes +Yes +random +999,427 +medium- +replay +301,698 +medium +999,175 +medium- +expert +1,998,158 +expert +999,036 +Halfcheetah +17 +6 +No +Yes +random +999,000 +medium- +replay +201,798 +medium +999,000 +medium- +expert +1,998,000 +expert +999,000 +Hopper +11 +3 +Yes +Yes +random +999,999 +medium- +replay +401,598 +medium +999,998 +medium- +expert +1,998,966 +expert +999,061 +Walker2d +17 +6 +Yes +Yes +random +999,999 +medium- +replay +301,698 +medium +999,322 +medium- +expert +1,998,318 +expert +999,000 +Table 7: Summary of the Gym-Mujoco environments and datasets used to train VLBM and baselines. +State Dim. +Action Dim. +Early Term. +Continuous Ctrl. +Dataset +Dataset Size +Pen +45 +24 +Yes +Yes +human +4,975 +cloned +496,264 +expert +494,248 +Door +39 +28 +No +Yes +human +6,704 +cloned +995,642 +expert +995,000 +Hammer +46 +26 +No +Yes +human +11,285 +cloned +996,394 +expert +995,000 +Relocate +39 +30 +No +Yes +human +9,917 +cloned +996,242 +expert +995,000 +Table 8: Summary of the Adroit environments and datasets used to train VLBM and baselines. +31 + diff --git a/ZtFLT4oBgHgl3EQfWS9S/content/tmp_files/load_file.txt b/ZtFLT4oBgHgl3EQfWS9S/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..eca096ef30f54c7fc4e9596e22a904f3bbfb957d --- /dev/null +++ b/ZtFLT4oBgHgl3EQfWS9S/content/tmp_files/load_file.txt @@ -0,0 +1,2623 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf,len=2622 +page_content='Published as a conference paper at ICLR 2023 VARIATIONAL LATENT BRANCHING MODEL FOR OFF-POLICY EVALUATION Qitong Gao∗ Ge Gao† Min Chi† Miroslav Pajic∗ ABSTRACT Model-based methods have recently shown great potential for off-policy evaluation (OPE);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' offline trajectories induced by behavioral policies are fitted to transitions of Markov decision processes (MDPs), which are used to rollout simulated trajectories and estimate the performance of policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Model-based OPE methods face two key challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' First, as offline trajectories are usually fixed, they tend to cover limited state and action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Second, the performance of model-based methods can be sensitive to the initialization of their parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In this work, we propose the variational latent branching model (VLBM) to learn the transition function of MDPs by formulating the environmental dynamics as a compact latent space, from which the next states and rewards are then sampled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, VLBM leverages and extends the variational inference framework with the recurrent state alignment (RSA), which is designed to capture as much information underlying the limited training data, by smoothing out the information flow between the variational (encoding) and generative (decoding) part of VLBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, we also introduce the branching architecture to improve the model’s robustness against randomly initialized model weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The effectiveness of the VLBM is evaluated on the deep OPE (DOPE) benchmark, from which the training trajectories are designed to result in varied coverage of the state-action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We show that the VLBM outperforms existing state-of-the-art OPE methods in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1 INTRODUCTION Off-policy evaluation (OPE) allows for evaluation of reinforcement learning (RL) policies without online interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It is applicable to many domains where on-policy data collection could be prevented due to efficiency and safety concerns, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', healthcare (Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2022c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Tang & Wiens, 2021), recommendation systems (Mehrotra et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2011), education (Mandel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2014), social science (Segal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2018) and optimal control (Silver et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Vinyals et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Recently, as reported in the deep OPE (DOPE) benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b), model-based OPE methods, leveraging feed-forward (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b) and auto-regressive (AR) (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a) architectures, have shown promising results toward estimating the return of target policies, by fitting transition functions of MDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, model-based OPE methods remain challenged as they can only be trained using offline trajectory data, which often offers limited coverage of state and action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Thus, they may perform sub-optimally on tasks where parts of the dynamics are not fully explored (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, different initialization of the model weights could lead to varied evaluation performance (Hanin & Rolnick, 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rossi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019), reducing the robustness of downstream OPE estimations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Some approaches in RL policy optimization literature use latent models trained to capture a compact space from which the dynamics underlying MDPs are extrapolated;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' this allows learning expressive representations over the state-action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, such approaches usually require online data collections as the focus is on quickly navigating to the high-reward regions (Rybkin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021), as well as on improving coverage of the explored state and action space (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2020a) or sample efficiency (Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In this work, we propose the variational latent branching model (VLBM), aiming to learn a compact and disentangled latent representation space from offline trajectories, which can better capture the ∗Duke University, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Contact: {qitong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='gao, miroslav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='pajic}@duke.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' †North Carolina State University, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Code available at https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='com/gaoqitong/vlbm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12056v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='LG] 28 Jan 2023 Published as a conference paper at ICLR 2023 dynamics underlying environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLBM enriches the architectures and optimization objectives for existing latent modeling frameworks, allowing them to learn from a fixed set of offline trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, VLBM considers learning variational (encoding) and generative (decoding) distributions, both represented by long short-term memories (LSTMs) with reparameterization (Kingma & Welling, 2013), to encode the state-action pairs and enforce the transitions over the latent space, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To train such models, we optimize over the evidence lower bound (ELBO) jointly with a recurrent state alignment (RSA) term defined over the LSTM states;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' this ensures that the information encoded into the latent space can be effectively teased out by the decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Then, we introduce the branching architecture that allows for multiple decoders to jointly infer from the latent space and reach a consensus, from which the next state and reward are generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' This is designed to mitigate the side effects of model-based methods where different weight initializations could lead to varied performance (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hanin & Rolnick, 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rossi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We focus on using the VLBM to facilitate OPE since it allows to better distinguish the improvements made upon learning dynamics underlying the MDP used for estimating policy returns, as opposed to RL training where performance can be affected by multiple factors, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', techniques used for exploration and policy optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, model-based OPE methods is helpful for evaluating the safety and efficacy of RL-based controllers before deployments in the real world (Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2022b), e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', how a surgical robot would react to states that are critical to a successful procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The key contributions of this paper are summarized as follows: (i) to the best of our knowledge, the VLBM is the first method that leverages variational inference for OPE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It can be trained using offline trajectories and capture environment dynamics over latent space, as well as estimate returns of target (evaluation) policies accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (ii) The design of the RSA loss term and branching architecture can effectively smooth the information flow in the latent space shared by the encoder and decoder, increasing the expressiveness and robustness of the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' This is empirically shown in experiments by comparing with ablation baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (iii) Our method generally outperforms existing model-based and model-free OPE methods, for evaluating policies over various D4RL environments (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, we follow guidelines provided by the DOPE benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b), which contains challenging OPE tasks where the training trajectories include varying levels of coverage of the state-action space, and target policies are designed toward resulting in state-action distributions different from the ones induced by behavioral policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2 VARIATIONAL LATENT BRANCHING MODEL In this section, we first introduce the objective of OPE and the variational latent model (VLM) we consider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Then, we propose the recurrent state alignment (RSA) term as well as the branching architecture that constitute the variational latent branching model (VLBM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1 OPE OBJECTIVE We first introduce the MDP used to characterize the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, an MDP can be defined as a tuple M = (S, A, P, R, s0, γ), where S is the set of states, A the set of actions, P : S × A → S is the transition distribution usually captured by probabilities p(st|st−1, at−1), R : S × A → R is the reward function, s0 is the initial state sampled from the initial state distribution p(s0), γ ∈ [0, 1) is the discounting factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Finally, the agent interacts with the MDP following some policy π(a|s) which defines the probabilities of taking action a at state s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Then, the goal of OPE can be formulated as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Given trajectories collected by a behavioral policy β, ρβ = {[(s0, a0, r0, s1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , (sT −1, aT −1, rT −1, sT )](0), [(s0, a0, r0, s1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ](1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' |at ∼ β(at|st)}1, es- timate the expected total return over the unknown state-action visitation distribution ρπ of the target (evaluation) policy π – i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', for T being the horizon, E(s,a)∼ρπ,r∼R ��T t=0 γtR(st, at) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (1) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2 VARIATIONAL LATENT MODEL We consider the VLM consisting of a prior p(z) over the latent variables z ∈ Z ⊂ Rl, with Z repre- senting the latent space and l the dimension, along with a variational encoder qψ(zt|zt−1, at−1, st) 1We slightly abuse the notation ρβ, to represent either the trajectories or state-action visitation distribution under the behavioral policy, depending on the context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2 Published as a conference paper at ICLR 2023 and a generative decoder pφ(zt, st, rt−1|zt−1, at−1), parameterized by ψ and φ respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Basics of variational inference are introduced in Appendix F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Latent Prior p(z0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The prior specifies the distribution from which the latent variable of the initial stage, z0, is sampled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We configure p(z0) to follow a Gaussian with zero mean and identity covariance matrix, which is a common choice under the variational inference framework (Kingma & Welling, 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ℎ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' "" #" %" $" $!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' "!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ℎ" ℎ# #!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' $# .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' "# %!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ℎ$ $$ !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='$%& "$%& $$%\' "$ $$%& Inference Process Generative Process Figure 1: Architecture of variational latent model (VLM) we consider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Variational Encoder for Inference qψ(zt|zt−1, at−1, st).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The encoder is used to approximate the intractable posterior, p(zt|zt−1, at−1, st) = p(zt−1,at−1,zt,st) � zt∈Z p(zt−1,at−1,zt,st)dzt , where the de- nominator requires integrating over the unknown latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, the encoder can be decomposed into two parts, given that qψ(z0:T |s0:T , a0:T −1) =qψ(z0|s0) T � t=1 qψ(zt|zt−1, at−1, st);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2) here, qψ(z0|s0) encodes the initial state s0 in to the corresponding latent variable z0, then, qψ(zt|zt−1, at−1, st) enforces the transi- tion from zt−1 to zt conditioned on at−1 and st.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Both distributions are diagonal Gaussians2, with means and diagonal of covariance matrices determined by multi-layered perceptron (MLP) (Bishop, 2006) and long short-term memory (LSTM) (Hochreiter & Schmidhuber, 1997) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The weights for both neural networks are referred to as ψ in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Consequently, the inference process for zt can be summarized as zψ 0 ∼ qψ(z0|s0), hψ t = fψ(hψ t−1, zψ t−1, at−1, st), zψ t ∼ qψ(zt|hψ t ), (3) where fψ represents the LSTM layer and hψ t the LSTM recurrent (hidden) state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that we use ψ in superscripts to distinguish the variables involved in this inference process, against the generative process introduced below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, reparameterization can be used to sample zψ 0 and zψ t , such that gradients of sampling can be back-propagated, as introduced in (Kingma & Welling, 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Overview of the inference and generative processes are illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Generative Decoder for Sampling pφ(zt, st, rt−1|zt−1, at−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The decoder is used to interact with the target policies and acts as a synthetic environment during policy evaluation, from which the expected returns can be estimated as the mean return of simulated trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The decoder can be represented by the multiplication of three diagonal Gaussian distributions, given that pφ(z1:T , s0:T , r0:T −1|z0, π) = T � t=0 pφ(st|zt) T � t=1 pφ(zt|zt−1, at−1)pφ(rt−1|zt), (4) with at ∼ π(at|st) at each time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, pφ(zt|zt−1, at−1) has its mean and covariance determined by an LSTM, enforcing the transition from zt−1 to zt in the latent space given action at−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In what follows, pφ(st|zt) and pφ(rt−1|zt) generate the current state st and reward rt−1 given zt, whose mean and covariance are determined by MLPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As a result, the generative process starts with sampling the initial latent variable from the latent prior, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', zφ 0 ∼ p(z0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Then, the initial state sφ 0 ∼ pφ(s0|zφ 0 ) and action a0 ∼ π(a0|sφ 0) are obtained from pφ and target policy π, respectively;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' the rest of generative process can be summarized as hφ t = fφ(hφ t−1, zφ t−1, at−1), ˜hφ t = gφ(hφ t ), zφ t ∼ pφ(˜hφ t ), sφ t ∼ pφ(st|zφ t ), rφ t−1 ∼ pφ(rt−1|zφ t ), at ∼ π(at|sφ t ), (5) 2Assume that different dimensions of the states are non-correlated with each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Otherwise, the states can be projected to orthogonal basis, such that non-diagonal elements of the covariance matrix will be zeros.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3 Published as a conference paper at ICLR 2023 Figure 2: (Left) Recurrent state alignment (RSA) applied over the recurrent hidden states between inference and generative process illustrated separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (Right) Single-step forward pass of the varia- tional latent branching model (VLBM), the training objectives for each branch and final predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' where fφ is the LSTM layer producing recurrent state hφ t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Then, an MLP gφ is used to generate mapping between hφ t and ˜hφ t that will be used for recurrent state alignment (RSA) introduced below, to augment the information flow between the inference and generative process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Furthermore, to train the elements in the encoder (3) and decoder (5), one can maximize the evidence lower bound (ELBO), a lower bound of the joint log-likelihood p(s0:T , r0:T −1), following LELBO(ψ, φ) =Eqψ � �T t=0 log pφ(st|zt) + �T t=1 log pφ(rt−1|zt) − KL � qψ(z0|s0)||p(z0) � − �T t=1 KL � qψ(zt|zt−1, at−1, st)||pφ(zt|zt−1, at−1) �� ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (6) here, the first two terms represent the log-likelihood of reconstructing the states and rewards, and the last two terms regularize the approximated posterior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The proof can be found in Appendix E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3 RECURRENT STATE ALIGNMENT The latent model discussed above is somewhat reminiscent of the ones used in model-based RL policy training methods, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', recurrent state space model (RSSM) used in PlaNet (Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019) and Dreamer (Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='b), as well as similar ones in Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Such methods rely on a growing experience buffer for training, which is collected online by the target policy that is being concurrently updated (with exploration noise added);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' however, OPE aims to extrapolate returns from a fixed set of offline trajectories which may result in limited coverage of the state and action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Consequently, directly applying VLM for OPE can lead to subpar performance empirically;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' see results in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, the encoder above plays a key role of capturing the temporal transitions between latent variables, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', pψ(zt|zt−1, at−1, st) from (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, it is absent in the generative process, as the decoder leverages a separate network to determine the latent transitions, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', pφ(zt|zt−1, at−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, from the ELBO (6) above it can be seen that only the KL-divergence terms are used to regularize these two parts, which may not be sufficient for OPE as limited offline trajectories are provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As a result, we introduce the RSA term as part of the training objective, to further regularize pψ(zt|zt−1, at−1, st) and pφ(zt|zt−1, at−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' A graphical illustration of RSA can be found in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3 Specifically, RSA is defined as the mean pairwise squared error between hψ t from the encoder (3) and ˜hφ t from the decoder (5), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', LRSA(˜hφ t , hψ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ψ, φ) = 1 N N � i=1 T � t=0 M(M − 1) 2 � M−1 � j=1 M � k=j+1 � (˜hφ t [j] − ˜hφ t [k]) − (hψ t [j] − hψ t [k]) �2� ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (7) here, we assume that both LSTM recurrent states have the same dimension ˜hφ t , hψ t ∈ RM, with h(·) t [j] referring to the j-th element of the recurrent state, and N the number of training trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Here, we choose the pairwise squared loss over the classic mean squared error (MSE), because MSE could be too strong to regularize hψ t and ˜hφ t which support the inference and generative processes respectively and are not supposed to be exactly the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In contrast, the pairwise loss (7) can 3Rewards and actions are omitted for conciseness of the presentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 4 max Likelihood S Φ1 AMLP Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' St Φ2 LSTM MLP D 2 S 山 S at- ΦB LSTM ΦB u 七 ΦB Lr D min KL DivergenceInference So S1 S2 山 ha 必 Recurrent State Alignment (RSA) & .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Φ 2 Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2 d S1 Generative S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Published as a conference paper at ICLR 2023 promote structural similarity between the LSTM recurrent states of the encoder and decoder, without strictly enforcing them to become the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that this design choice has been justified in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3 through an ablation study by comparing against models trained with MSE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In general, the pairwise loss has also been adopted in many domains for similar purposes, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', object detection (Gould et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rocco et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2018), ranking systems (Doughty et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Saquil et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021) and contrastive learning (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Similarly, we apply the pairwise loss over hψ t and ˜hφ t , instead of directly over hψ t and hφ t , as the mapping gφ (from equation 5) could serve as a regularization layer to ensure optimality over LRSA without changing hψ t , hφ t significantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As a result, the objective for training the VLM, following architectures specified in (3) and (5), can be formulated as max ψ,φ LV LM(ψ, φ) = max ψ,φ � LELBO(ψ, φ) − C · LRSA(˜hφ t , hψ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ψ, φ) � , (8) with C > 0 and C ∈ R being the constant balancing the scale of the ELBO and RSA terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4 BRANCHING FOR GENERATIVE DECODER The performance of model-based methods can vary upon different design factors (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hanin & Rolnick, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, Rossi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2019) has found that the convergence speed and optimality of variational models are sensitive to the choice of weight initialization techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' More- over, under the typical variational inference setup followed by the VLM above, the latent transitions reconstructed by the decoder, pφ(zt|zt−1, at−1), are only trained through regularization losses in (6) and (7), but are fully responsible for rolling out trajectories during evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Consequently, in this sub-section we introduce the branching architecture for decoder, with the goal of minimizing the impact brought by random weight initialization of the networks, and allowing the decoder to best reconstruct the latent transitions pφ(zt|zt−1, at−1) as well as st’s and rt−1’s correctly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, the branching architecture leverages an ensemble of B ∈ Z+ decoders to tease out information from the latent space formulated by the encoder, with final predictions sampled from a mixture of the Gaussian output distributions from (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that the classic setup of ensembles is not considered, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', train and average over B VLMs end-to-end;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' because in this case B different latent space exist, each of which is still associated with a single decoder, leaving the challenges above unresolved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' This design choice is justified by ablations studies in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3, by comparing VLBM against a (classic) ensemble of VLMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Branching Architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Consider the generative process involving B branches of the decoders parameterized by {φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The forward architecture over a single step is illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4 Specifically, the procedure of sampling zφb t and sφb t for each b ∈ [1, B] follows from (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Recall that by definition pφb(st|zφb t ) follows multivariate Gaussian with mean and diagonal of covariance matrix determined by the corresponding MLPs, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', µ(sφb t ) = φMLP b,µ (zφb t ) and Σdiag(sφb t ) = φMLP b,Σ (zφb t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In what follows, the final outcome sφ t can be sampled following diagonal Gaussian with mean and variance determined by weighted averaging across all branches using weights wb’s, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', sφ t ∼ pφ(st|zφ1 t , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , zφB t ) = N � µ = � b wb · µ(sφb t ), Σdiag = � b w2 b · Σdiag(sφb t ) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (9) The objective below can be used to jointly update, wb’s, ψ and φb’s, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', max ψ,φ,w LV LBM(ψ, φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB, w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , wB) = max ψ,φ,w � T � t=0 log pφ(sφ t |zφ1 t , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , zφB t ) − C1 · � b LRSA(˜hφb t , hψ t ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ψ, φb) + C2 � b LELBO(ψ, φb) � , s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , wB > 0 , � b wb = 1 and constants C1, C2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (10) Though the first term above already propagates through all wb’s and φb’s, the third term and constraints over wb’s regularize φb in each individual branch such that they are all trained toward maximizing 4For simplicity, the parts generating rewards are omitted without lost of generality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 5 Published as a conference paper at ICLR 2023 the likelihood pφb(sφb t |zφb t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Pseudo-code for training and evaluating the VLBM can be found in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Further, in practice, one can define wb = v2 b ϵ+� b v2 b , with vb ∈ R the learnable variables and 0 < ϵ ≪ 1, ϵ ∈ R, the constant ensuring denominator to be greater than zero, to convert (10) into unconstrained optimization and solve it using gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Lastly, note that complementary latent modeling methods, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', latent overshooting from Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2019), could be adopted in (10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, we keep the objective straightforward, so that the source of performance improvements can be isolated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3 EXPERIMENTS Figure 3: Mean rank correlation, regret@1 and MAE over all the 32 Gym-Mujoco and Adroit tasks, showing VLBM achieves state-of-the-art performance overall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To evaluate the VLBM, we follow the guidelines from the deep OPE (DOPE) benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, we follow the D4RL branch in DOPE and use the Gym- Mujoco and Adroit suites as the test base (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Such environ- ments have long horizons and high- dimensional state and action space, which are usually challenging for model-based methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The provided offline trajectories for training are collected using behavioral policies at varied scale, including limited ex- ploration, human teleoperation etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', which can result in different levels of coverage over the state-action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Also, the target (evaluation) policies are generated using online RL training, aiming to reduce the similarity between behavioral and target policies;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' it introduces another challenge that during evaluation the agent may visit states unseen from training trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Environmental and Training Setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' A total of 8 environments are provided by Gym-Mujoco and Adroit suites (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, each environment is provided with 5 (for Gym-Mujoco) or 3 (for Adroit) training datasets collected using different behavioral policies, resulting in a total of 32 sets of env-dataset tasks5 – a full list can be found in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' DOPE also provides 11 target policies for each environment, whose performance are to be evaluated by the OPE methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' They in general result in varied scales of returns, as shown in the x-axes of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, we consider the decoder to have B = 10 branches, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', {pφ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , pφ10}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The dimension of latent space is set to be 16, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', z ∈ Z ⊂ R16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Other implementation details can be found in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Baselines and Evaluation Metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In addition to the five baselines reported from DOPE, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', importance sampling (IS) (Precup, 2000), doubly robust (DR) (Thomas & Brunskill, 2016), variational power method (VPM) (Wen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020), distribution correction estimation (DICE) (Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020), and fitted Q-evaluation (FQE) (Le et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019), the effectiveness of VLBM is also compared against the state-of-the-art model-based OPE method leveraging the auto-regressive (AR) architecture (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, for each task we train an ensemble of 10 AR models, for fair comparisons against VLBM which leverages the branching architecture;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' see Appendix A for details of the AR ensemble setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Following the DOPE benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b), our evaluation metrics includes rank correlation, regret@1, and mean absolute error (MAE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLBM and all baselines are trained using 3 different random seeds over each task, leading to the results reported below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ablation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Four ablation baselines are also considered, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', VLM, VLM+RSA, VLM+RSA(MSE) and VLM+RSA Ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, VLM refers to the model introduced in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2, trained toward maximizing only the ELBO, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', (6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that, arguably, VLM could be seen as the general- ization of directly applying latent-models proposed in existing RL policy optimization literature (Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2020a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2022);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' details can be found in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 4 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The VLM+RSA ablation baseline follows the same model architecture as VLM, but is trained to optimize over both ELBO and recurrent state alignment (RSA) as introduced in (8), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', branching is not used comparing to VLBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The design of these two baselines can help analyze the effectiveness of the RSA 5From now on the dataset names are abbreviated by their initials, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', Ant-M-R refers to Ant-Medium-Replay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 6 VLBM BaselinesPublished as a conference paper at ICLR 2023 Figure 4: Mean rank correlation, regret@1 and MAE over all datasets, for each Mujoco environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Figure 5: Mean rank correlation, regret@1 and MAE over all datasets, for each Adroit environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' loss term and branching architecture introduced in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, VLM+RSA(MSE) uses mean squared error to replace the pairwise loss introduced in (7), and the VLM+RSA Ensemble applies classic ensembles by averaging over B VLM+RSA models end-to-end, instead of branching from decoder as in VLBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' These two ablation baselines can help justify the use of pairwise loss for RSA, and the benefit of using branching architecture over classic ensembles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Figure 6: Distribution of all branching weights, wb’s, over all VLBMs trained on the 32 tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3 shows the mean overall performance attained by VLBM and baselines over all the 32 Gym- Mujoco and Adroit tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In general VLBM leads to significantly increased rank correlations and decreased re- gret@1’s over existing methods, with MAEs maintained at the state-of-the-art level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, VLBM achieves state-of-the-art performance in 31, 29, and 15 (out of 32) tasks in terms of rank correlation, regret@1 and MAE, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Performance for each task can be found in Tables 1- 6 at the end of Appendices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that results for IS, VPM, DICE, DR, and FQE are obtained directly from DOPE benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b), since the same ex- perimental setup is considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 4 and 5 visualize the mean performance for each Gym-Mujoco and Adroit environment respectively, over all the associated datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It can be also observed that the model-based and FQE baselines generally perform better than the other baselines, which is consistent with findings from DOPE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The fact that VLM+RSA outperforming the VLM ablation baseline, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 4, illustrates the need of the RSA loss term to smooth the flow of information between the encoder and decoder, in the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, one can observe that VLM+RSA(MSE) sometimes performs worse than VLM, and significantly worse than VLM+RSA in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, it has be found that, compared to VLM and VLM+RSA respectively, VLM+RSA(MSE) significantly worsen at least two metrics in 7 and 12 (out of 20) Gym-Mujoco tasks;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' detailed performance over these tasks can be found in Tables 1- 6 at the end of Appendices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Such a finding backs up the design choice of using pairwise loss for RSA instead of MSE, as MSE could be overly strong to regularize the LSTM recurrent states of the encoder and decoder, while pairwise loss only enforces structural similarities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, VLBM significantly improves rank correlations and regrets greatly compared to VLM+RSA, illustrating the importance of the branching architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In the paragraph below, we show empirically the benefits brought in by branching over classic ensembles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 7 VLBM Ablation Baselines Other BaselinesT H VLBM Baselines Weights 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0% 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0% .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='. of Branching 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0% 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0% Dist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='9 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0Published as a conference paper at ICLR 2023 Figure 8: t-SNE visualization over the latent space, capturing encoded state-action visitations induced from all target policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Each point is colored by the corresponding policy from which it is generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Policies in the legend are sorted in the order of increasing performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Branching versus Classic Ensembles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 4 shows that the VLM+RSA Ensemble does not improve performance over the VLM+RSA in general, and even leads to worse overall rank correlations and regrets in Walker2d and Hopper environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' This supports the rationale provided in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4 that each decoder still samples from different latent space exclusively, and averaging over the output distributions may not help reduce the disturbance brought in by the modeling artifacts under the variational inference framework, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', random weight initializations (Hanin & Rolnick, 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rossi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In contrast, the VLBM leverages the branching architecture, allowing all the branches to sample from the same latent space formulated by the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Empirically, we find that the branching weights, wb’s in (9), allows VLBM to kill branches that are not helpful toward reconstructing the trajectories accurately, to possibly overcome bad initializations etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Over all the the 32 tasks we consider, most of VLBMs only keep 1-3 branches (out of 10), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', wb < 10−5 for all other branches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The distribution of all wb’s, from VLBMs trained on the 32 tasks, are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' one can observe that most of the wb’s are close to zero, while the others generally fall in the range of (0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25] and [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='75, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Figure 7: Correlation between the estimated (y- axis) and true returns (x-axis), across different model-based OPE methods and environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' AR ensembles also lead to compelling rank cor- relations and regrets, but attains much smaller margins in MAEs over other baselines in gen- eral;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' From Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 7, one can observe that it tends to significantly under-estimate most of the high-performing policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Scatter plots for the other tasks can be found in Appendix A, which also show this trend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The reason could be that its model architecture and training objec- tives are designed to directly learn the transitions of the MDP;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' thus, may produce biased predic- tions when the target policies lead to visitation of the states that are not substantially presented in training data, since such data are obtained us- ing behavioral policies that are sub-optimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In contrast, the VLBM can leverage RSA and branching against such situations, thus outperforming AR ensembles in most of the OPE tasks in terms of all metrics we considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Interestingly, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 7 also shows that latent models could sometimes over-estimate the returns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' For example, in Hopper-M-E and Walker2d-M-E, VLM tends to over-estimate most policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The VLBM performs consistently well in Hopper-M-E, but is mildly affected by such an effect in Walker2d-M-E, though over fewer policies and smaller margins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It has been found that variational inference may fall short in approximating true distributions that are asymmetric, and produce biased estimations (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' So the hypothesis would be that the dynamics used to define certain environments may lead to asymmetry in the true posterior p(zt|zt−1, at−1, st), which could be hard to be captured by the latent modeling framework we consider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' More comprehensive understanding of such behavior can be explored in future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, the VLBM still significantly outperforms VLM overall, and achieves top-performing rank correlations and regrets;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' such results illustrate the VLBM’s improved robustness as a result of its architectural design and choices over training objectives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' t-SNE Visualization of the Latent Space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 8 illustrates t-SNE visualization of the latent space by rolling out trajectories using all target policies respectively, followed by feeding the state-action pairs into the encoder of VLBM which maps them into the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It shows the encoded state-action pairs induced from policies with similar performance are in general swirled and clustered together, illustrating that VLBM can learn expressive and disentangled representations of its inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 8 2391 1252 1342 933 Ensembl 1559 789 517 567 727 237 219 202 R 105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 316.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 955 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' A 727 2391 316 237 789 1342 219 1252 105 1559 955 517 164 202 567 933 1252 1342 933 2391 1559 789 517 567 > 727 237 219 202 105 316 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 955 955-2195i7 ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='105 1559 2391 567 727 316 237 789 1342 1252 164 202 933 2391 1252 1342 933 M 1559 789 517 567 727 237 219 202 105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 316.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='955 105 5727 15592391 316 237 789 1342 955 5-219 5171252 164202 567 933 Halfcheetah-M-E Hopper-M-E Ant-M-E Walker2d-M-TT 0 Lowest Performance 4 5 6 8 Highest Halfcheetah-M-E Walker2d-M-E Ant-M-E 9 Hopper-M-E 10 PerformancePublished as a conference paper at ICLR 2023 4 RELATED WORK Latent Modeling in RL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Though variational inference has rarely been explored to facilitate model- based OPE methods so far, there exist several latent models designed for RL policy optimization that are related to our work, such as SLAC (Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020), SOLAR (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019), LatCo (Rybkin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021), PlaNet (Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019), Dreamer (Hafner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Below we discuss the connections and distinctions between VLBM and the latent models leveraged by them, with a detailed overview of these methods provided in Appendix G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, SLAC and SOLAR learn latent representations of the dynamics jointly with optimization of the target policies, using the latent information to improve sample efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Similarly, LatCo performs trajectory optimization over the latent space to allow for temporarily bypassing dynamic constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As a result, latent models used in such methods are not designed toward rolling out trajectories independently, as opposed to the use of VLBM in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PlaNet and Dreamer train the recurrent state space model (RSSM) using a growing experience dataset collected by the target policy that is being concurrently updated (with exploration noise added), which requires online data collection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In contrast, under the OPE setup, VLBM is trained over a fixed set of offline trajectories collected over unknown behavioral policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, note that the VLM baseline is somewhat reminiscent of the RSSM and similar ones as in Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2022), however, experiments above show that directly using VLM for OPE could lead to subpar performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' On the other hand, though MOPO (Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020), LOMPO (Rafailov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021) and COMBO (Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021) can learn from offline data, they focus on quantifying the uncertainty of model’s predictions toward next states and rewards, followed by incorporating them into policy optimization objectives to penalize for visiting regions where transitions are not fully captured;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' thus, such works are also orthogonal to the use case of OPE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' OPE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Classic OPE methods adopt IS to estimate expectations over the unknown visitation distribu- tion over the target policy, resulting in weighted IS, step-wise IS and weighted step-wise IS (Precup, 2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' IS can lead to estimations with low (or zero) bias, but with high variance (Kostrikov & Nachum, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Jiang & Li, 2016), which sparks a long line of research to address this challenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' DR methods propose to reduce variance by coupling IS with a value function approximator (Jiang & Li, 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Thomas & Brunskill, 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Farajtabar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, the introduction of such approximations may increase bias, so the method proposed in Tang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2019) attempts to balance the scale of bias and variance for DR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Unlike IS and DR methods that require the behavioral policies to be fully known, DICE family of estimators (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Nachum et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Dai et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020) and VPM (Wen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020) can be behavioral-agnostic;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' they directly capture marginalized IS weights as the ratio between the propensity of the target policy to visit particular state-action pairs, relative to their likelihood of appearing in the logged data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' There also exist FQE methods which extrapolate policy returns from approximated Q-functions (Hao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Le et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Kostrikov & Nachum, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Existing model-based OPE methods are designed to directly fit MDP transitions using feed-forward (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b) or auto-regressive (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a) models, and has shown promising results over model-free methods as reported in a recent benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, such model-based approaches could be sensitive to the initialization of weights (Hanin & Rolnick, 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rossi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2019) and produce biased predictions, due to the limited coverage over state and action space provided by offline trajectories (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Instead, VLBM mitigates such effects by capturing the dynamics over the latent space, such that states and rewards are evolved from a compact feature space over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, RSA and the branching can lead to increased expressiveness and robustness, such that future states and rewards are predicted accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' There also exist OPE methods proposed toward specific applications (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Saito et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2023;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 5 CONCLUSION AND FUTURE WORK We have developed the VLBM which can accurately capture the dynamics underlying environments from offline training data that provide limited coverage of the state and action space;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' this is achieved by using the RSA term to smooth out the information flow from the encoders to decoders in the latent space, as well as the branching architecture which improve VLBM’s robustness against random initializations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We have followed evaluation guidelines provided by the DOPE benchmark, and experimental results have shown that the VLBM generally outperforms the state-of-the-art model- based OPE method using AR architectures, as well as other model-free methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLBM can also facilitate off-policy optimizations, which can be explored in future works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, VLBM can serve as a synthetic environment on which optimal controllers (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', linear–quadratic regulator) can be deployed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' On the other hand, similar to Dreamer and SLAC, policies can be updated jointly with training of VLBM, but without the need of online interactions with the environment during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 9 Published as a conference paper at ICLR 2023 ACKNOWLEDGMENTS This work is sponsored in part by the AFOSR under award number FA9550-19-1-0169, as well as the NSF CNS-1652544, CNS-1837499, DUE-1726550, IIS-1651909 and DUE-2013502 awards, as well as the National AI Institute for Edge Computing Leveraging Next Generation Wireless Networks, Grant CNS-2112562.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' REFERENCES Christopher M Bishop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Pattern recognition and machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Springer, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Minmin Chen, Can Xu, Vince Gatto, Devanshu Jain, Aviral Kumar, and Ed Chi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Off-policy actor- critic for recommender systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the 16th ACM Conference on Recommender Systems, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 338–349, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' A simple framework for contrastive learning of visual representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1597–1607.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Bo Dai, Ofir Nachum, Yinlam Chow, Lihong Li, Csaba Szepesvári, and Dale Schuurmans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Coindice: Off-policy confidence interval estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 33:9398–9411, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hazel Doughty, Dima Damen, and Walterio Mayol-Cuevas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Who’s better?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' who’s best?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' pairwise deep ranking for skill determination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 6057–6066, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mehrdad Farajtabar, Yinlam Chow, and Mohammad Ghavamzadeh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' More robust doubly robust off-policy evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1447–1456.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Justin Fu, Aviral Kumar, Ofir Nachum, George Tucker, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' D4rl: Datasets for deep data-driven reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07219, 2020a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Justin Fu, Mohammad Norouzi, Ofir Nachum, George Tucker, Alexander Novikov, Mengjiao Yang, Michael R Zhang, Yutian Chen, Aviral Kumar, Cosmin Paduraru, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Benchmarks for deep off-policy evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Learning Representations, 2020b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ge Gao, Qitong Gao, Xi Yang, Miroslav Pajic, and Min Chi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' A reinforcement learning-informed pat- tern mining framework for multivariate time series classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Joint Conference on Artificial Intelligence (IJCAI), 2022a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ge Gao, Song Ju, Markel Sanz Ausin, and Min Chi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hope: Human-centric off-policy evaluation for e-learning and healthcare.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Autonomous Agents and Multiagent Systems (AAMAS), 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Qitong Gao, Davood Hajinezhad, Yan Zhang, Yiannis Kantaros, and Michael M Zavlanos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Reduced variance deep reinforcement learning with temporal logic specifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the 10th ACM/IEEE International Conference on Cyber-Physical Systems, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 237–248, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Qitong Gao, Michael Naumann, Ilija Jovanov, Vuk Lesi, Karthik Kamaravelu, Warren M Grill, and Miroslav Pajic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Model-based design of closed loop deep brain stimulation controller using reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In 2020 ACM/IEEE 11th International Conference on Cyber-Physical Systems (ICCPS), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 108–118.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' IEEE, 2020a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Qitong Gao, Miroslav Pajic, and Michael M Zavlanos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Deep imitative reinforcement learning for temporal logic robot motion planning with noisy semantic observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In 2020 IEEE International Conference on Robotics and Automation (ICRA), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 8490–8496.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' IEEE, 2020b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Qitong Gao, Stephen L Schmidt, Karthik Kamaravelu, Dennis A Turner, Warren M Grill, and Miroslav Pajic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Offline policy evaluation for learning-based deep brain stimulation controllers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In 2022 ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 80–91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' IEEE, 2022b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 10 Published as a conference paper at ICLR 2023 Qitong Gao, Dong Wang, Joshua D Amason, Siyang Yuan, Chenyang Tao, Ricardo Henao, Majda Hadziahmetovic, Lawrence Carin, and Miroslav Pajic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Gradient importance learning for incomplete observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' International Conference on Learning Representations, 2022c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Stephen Gould, Tianshi Gao, and Daphne Koller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Region-based segmentation and object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 22, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Danijar Hafner, Timothy Lillicrap, Ian Fischer, Ruben Villegas, David Ha, Honglak Lee, and James Davidson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Learning latent dynamics for planning from pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International conference on machine learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2555–2565.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Dream to control: Learning behaviors by latent imagination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Learning Representations, 2020a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Danijar Hafner, Timothy P Lillicrap, Mohammad Norouzi, and Jimmy Ba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mastering atari with discrete world models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Learning Representations, 2020b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Boris Hanin and David Rolnick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' How to start training: The effect of initialization and architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 31, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Botao Hao, Xiang Ji, Yaqi Duan, Hao Lu, Csaba Szepesvari, and Mengdi Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Bootstrapping fitted q-evaluation for off-policy inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 4074–4084.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Sepp Hochreiter and Jürgen Schmidhuber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Long short-term memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Neural computation, 9(8): 1735–1780, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Nan Jiang and Lihong Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Doubly robust off-policy value evaluation for reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 652–661.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Diederik P Kingma and Max Welling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Auto-encoding variational bayes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' arXiv preprint arXiv:1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6114, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ilya Kostrikov and Ofir Nachum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Statistical bootstrapping for uncertainty estimation in off-policy evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' arXiv preprint arXiv:2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13609, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hoang Le, Cameron Voloshin, and Yisong Yue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Batch policy learning under constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Interna- tional Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3703–3712.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Alex Lee, Anusha Nagabandi, Pieter Abbeel, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Stochastic latent actor-critic: Deep reinforcement learning with a latent variable model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 33, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Lihong Li, Wei Chu, John Langford, and Xuanhui Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Unbiased offline evaluation of contextual- bandit-based news article recommendation algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the fourth ACM Interna- tional Conference on Web Search and Data Mining, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 297–306, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Cong Lu, Philip J Ball, Tim GJ Rudner, Jack Parker-Holder, Michael A Osborne, and Yee Whye Teh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Challenges and opportunities in offline reinforcement learning from visual observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04779, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Travis Mandel, Yun-En Liu, Sergey Levine, Emma Brunskill, and Zoran Popovic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Offline policy evaluation across representations with applications to educational games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In AAMAS, volume 1077, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rishabh Mehrotra, James McInerney, Hugues Bouchard, Mounia Lalmas, and Fernando Diaz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Towards a fair marketplace: Counterfactual evaluation of the trade-off between relevance, fairness & satisfaction in recommendation systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2243–2251, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ofir Nachum, Yinlam Chow, Bo Dai, and Lihong Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Dualdice: Behavior-agnostic estimation of discounted stationary distribution corrections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 32:2318–2328, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 11 Published as a conference paper at ICLR 2023 Doina Precup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Eligibility traces for off-policy policy evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Computer Science Department Faculty Publication Series, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 80, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rafael Rafailov, Tianhe Yu, Aravind Rajeswaran, and Chelsea Finn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Offline reinforcement learning from images with latent space models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Learning for Dynamics and Control, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1154–1168.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ignacio Rocco, Relja Arandjelovi´c, and Josef Sivic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' End-to-end weakly-supervised semantic align- ment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 6917–6925, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Simone Rossi, Pietro Michiardi, and Maurizio Filippone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Good initializations of variational bayes for deep models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 5487–5497.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Oleh Rybkin, Chuning Zhu, Anusha Nagabandi, Kostas Daniilidis, Igor Mordatch, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Model-based reinforcement learning via latent-space collocation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 9190–9201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Yuta Saito, Takuma Udagawa, Haruka Kiyohara, Kazuki Mogi, Yusuke Narita, and Kei Tateno.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Evaluating the robustness of off-policy evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Fifteenth ACM Conference on Recommender Systems, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 114–123, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Yassir Saquil, Da Chen, Yuan He, Chuan Li, and Yong-Liang Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Multiple pairwise ranking networks for personalized video summarization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1718–1727, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Avi Segal, Kobi Gal, Ece Kamar, Eric Horvitz, and Grant Miller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Optimizing interventions via offline policy evaluation: Studies in citizen science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Thirty-Second AAAI Conference on Artificial Intelligence, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mastering the game of go with deep neural networks and tree search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Nature, 529(7587):484–489, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Shengpu Tang and Jenna Wiens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Model selection for offline reinforcement learning: Practical considerations for healthcare settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Machine Learning for Healthcare Conference, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2–35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ziyang Tang, Yihao Feng, Lihong Li, Dengyong Zhou, and Qiang Liu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Doubly robust bias reduction in infinite horizon off-policy estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Learning Representations, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Philip Thomas and Emma Brunskill.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Data-efficient off-policy policy evaluation for reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2139–2148.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Michaël Mathieu, Andrew Dudzik, Junyoung Chung, David H Choi, Richard Powell, Timo Ewalds, Petko Georgiev, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Grandmaster level in starcraft ii using multi-agent reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Nature, 575(7782):350–354, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Dense contrastive learning for self-supervised visual pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 3024–3033, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Junfeng Wen, Bo Dai, Lihong Li, and Dale Schuurmans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Batch stationary distribution estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 10203–10213.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mengjiao Yang, Ofir Nachum, Bo Dai, Lihong Li, and Dale Schuurmans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Off-policy evaluation via the regularized lagrangian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 33:6551–6561, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mengjiao Yang, Bo Dai, Ofir Nachum, George Tucker, and Dale Schuurmans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Offline policy selection under uncertainty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In Deep RL Workshop NeurIPS 2021, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 12 Published as a conference paper at ICLR 2023 Yuling Yao, Aki Vehtari, Daniel Simpson, and Andrew Gelman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Yes, but did it work?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' : Evaluating variational inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 5581–5590.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Tianhe Yu, Garrett Thomas, Lantao Yu, Stefano Ermon, James Y Zou, Sergey Levine, Chelsea Finn, and Tengyu Ma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mopo: Model-based offline policy optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 33:14129–14142, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Tianhe Yu, Aviral Kumar, Rafael Rafailov, Aravind Rajeswaran, Sergey Levine, and Chelsea Finn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Combo: Conservative offline model-based policy optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Advances in neural information processing systems, 34:28954–28967, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Marvin Zhang, Sharad Vikram, Laura Smith, Pieter Abbeel, Matthew Johnson, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Solar: Deep structured representations for model-based reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 7444–7453.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Michael R Zhang, Thomas Paine, Ofir Nachum, Cosmin Paduraru, George Tucker, Mohammad Norouzi, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Autoregressive dynamics models for offline policy evaluation and optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Learning Representations, 2020a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ruiyi Zhang, Bo Dai, Lihong Li, and Dale Schuurmans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Gendice: Generalized offline estimation of stationary values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Learning Representations, 2020b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Shangtong Zhang, Bo Liu, and Shimon Whiteson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Gradientdice: Rethinking generalized offline estimation of stationary values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In International Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 11194– 11203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PMLR, 2020c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 13 Published as a conference paper at ICLR 2023 Figure 9: The Gym-Mujoco and Adroit environments considered by the D4RL branch of DOPE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' A ADDITIONAL EXPERIMENTAL DETAILS AND RESULTS Additional Results and Discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Rank correlations, regret@1 and MAEs for all 32 tasks are documented in Tables 1- 6 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6 The mean and standard deviation (in subscripts) over 3 random seeds are reported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that in each column, performance of multiple methods may be highlighted in bold, meaning they all achieve the best performance and do not significantly outperform each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The fact that VLBM outperforms the ablation baselines in most cases suggests that the RSA loss term and branching architecture can effectively increase model expressiveness, and allow to learn the dynamics underlying the MDP more accurately and robustly from offline data that provide limited exploration coverage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Yet, smaller margins are attained between the VLBM and VLM+RSA in Hopper-M-E and Hopper-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It is likely because Hopper has relatively lower dimensional state space compared to the other three environments, from which the underlying dynamics can be sufficiently captured by the VLM+RSA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 10 and 11 shows the correlation between estimated (y-axis) and true returns (x-axis) for all the OPE tasks we consider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It can be found that for Halfcheetah-R, -M-R, -M, most of the model-based methods cannot significantly distinguish the returns across target policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The cause could be that the offline trajectories provided for this task are relatively more challenging, compared to the other OPE tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Such an effect appears to affect IS, VPM, DICE, DR and FQE at larger scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It can be observed from the scatter plots reported in the DOPE benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b) that these methods could hardly tell the scale of returns across different target policies;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' as the dots almost form a horizontal line in each plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, the estimated returns from VLBM and IS still preserve the rank, which leads to high rank correlations and low regrets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Implementation Details and Hyper-parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The model-based methods are evaluated by di- rectly interacting with each target policy for 50 episodes, and the mean of discounted total returns (γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='995) over all episodes is used as estimated performance for the policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We choose the neural network architectures as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' For the components involving LSTMs, which include qψ(zt|zt−1, at−1, st) and pφ(zt|zt−1, at−1), their architecture include one LSTM layer with 64 nodes, followed by a dense layer with 64 nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' All other components do not have LSTM layers involved, so they are constituted by a neural network with 2 dense layers, with 128 and 64 nodes respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The output layers that determine the mean and diagonal covariance of diagonal Gaussian distributions use linear and softplus activations, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The ones that determine the mean of Bernoulli distributions (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', for capturing early termination of episodes) are configured to use sigmoid activations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLBM and the two ablation baselines, VLM and VLM+RSA, are trained using offline trajectories provided by DOPE, with max_iter in Alg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 1 set to 1,000 and minibatch size set to 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Adam optimizer is used to perform gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To determine the learning rate, we perform grid search among {0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='003, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='001, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0007, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0005, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0003, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0001, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='00005}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Exponential decay is applied to the learning rate, which decays the learning rate by 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='997 every iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To train VLBM, we set the constants from equation 10 following C1 = C2, and perform grid search among 6Some VPM entries are absent since they were not reported in Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (2020b), nor the code is open-sourced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 14 Ant Halfcheetah Hopper Walker2d Pen Door Hammer RelocatePublished as a conference paper at ICLR 2023 {5, 1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='005, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='001, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0001}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To train VLM+RSA, the constant C from equation 8 is determined by grid search among the same set of parameters above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' L2-regularization with decay of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='001 and batch normalization are applied to all hidden layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Consider that some of the envi- ronments (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', Ant, Hopper, Walker2d, Pen) may terminate an episode, before timeout, if the state meets specific conditions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' details for VLBM to capture such early termination behavior is introduced in Appendix D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The DOPE Benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The deep OPE (DOPE) benchmark (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b) provides stan- dardized training and evaluation procedure for OPE works to follow, which facilitates fair and comprehensive comparisons among various OPE methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, it utilizes existing environ- ments and training trajectories provided by D4RL7 and RLUnplugged8, which are two benchmark suites for offline RL training, and additionally provide target policies for OPE methods to evaluate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In the D4RL branch, the training trajectories are originally collected from various sources including random exploration, human teleoperation, and RL-trained policies with limited exploration;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' thus, can provide varied levels of coverage over the state-action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, the target policies are trained using online RL algorithms, which can in general lead to different state-action visitations than in the training trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We leverage the D4RL branch as our test base, since the OPE tasks it provides are considered challenging, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', the limited coverage introduced by training data, as well as the discrepancy between the behavioral and target policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Graphical illustrations of the Gym-Mujoco and Adroit environments considered are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Details on the environments and datasets used are shown in Tables 7 and 8, from the perspectives of state and action dimensions, if episodes can be terminated before timeout, if controls are performed over continuous space, and the size of the offline trajectories used for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In contrast, in the RLUnplugged branch, the training trajectories are always collected using online RL training, which can result in adequate coverage over the state-action space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The target policies are trained by applying offline RL over the training trajectories, so that behavioral and target policies can lead to similar state-action visitation distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As discussed in DOPE (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b), such tasks are suitable for studies where ideal data are needed, such as complexity comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Evaluation Metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Following from (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b), we consider rank correlation, regret@1 and mean absolute error (MAE) as the evaluation metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, rank correlation measures the strength and direction of monotonic association between the rank of OPE-estimated returns and true returns over all target policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It is is captured by Spearsman’s correlation coefficient between the ordinal rankings between estimated and true returns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Regret@1 is captured by the difference between the return of the policy corresponding to the highest return as estimated by OPE and the return of the policy that actually produces the highest true return.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In other words, regret@1 evaluates how worse the policy resulting in the highest OPE-estimated return would perform than the actual best policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The two metrics above evaluate how useful OPE would be to facilitate important applications such as policy selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Finally, we also consider MAE which is commonly used in estimation/regression tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Mathematical definitions of these metrics can be found in (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Implementation of AR Ensembles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' For fair comparisons with VLBM, in experiments we train an ensemble of the state-of-the-art model-based OPE method, auto-regressive (AR) models (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a), as one of the baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, we train an ensemble of 10 AR models to learn p(st+1, rt|st, at) following the auto-regressive manner, with each individual model following the design introduced in (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', s(j) t+1 ∼ p(s(j) t+1|st, at, s(1) t+1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , s(j−1) t+1 ), (11) with s(j) t+1 representing the element located at the j-th dimension of the state variable, and D the dimension of state space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The reward is treated as an additional dimension of the states, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', rt ∼ p(rt|st, at, s(1) t+1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , s(D) t+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' However, in the original literature (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a) it does not introduce in details regarding which specific ensemble architecture is used (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', overall averaging or weighted averaging).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As a result, we choose the same weighted averaging procedure as used in VLBM branching, to sort out the influence of different ensemble architectures and facilitate fair comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, a total of 10 AR models, parameterized by {θ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , θ10}, along with 10 7https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='com/rail-berkeley/d4rl 8https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='com/deepmind/deepmind-research/tree/master/rl_unplugged 15 Published as a conference paper at ICLR 2023 weight variables {wθ 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , wθ 10| � i wθ i = 1}, are trained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Similar to weighted averaging architecture used in VLBM, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', equation 9, the mean and variance of the prediction s(j) t+1, captured by normal distribution N(µ, σ2), follow µ = �10 i=1 wθ i · µθi(s(j) t+1), σ2 = �10 i=1(wθ i )2 · σ2 θi(s(j) t+1), (12) where µθi(s(j) t+1) and σ2 θi(s(j) t+1) are the mean and variance produced from each individual AR model in the ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Training Resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Training of the proposed method, and baselines, are facilitated by Nvidia Quadro RTX 6000, NVIDIA RTX A5000, and NVIDIA TITAN XP GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' License.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The use of DOPE9 and D4RL (Fu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', 2020a) follow the Apache License 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 9https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='com/google-research/deep_ope 16 Published as a conference paper at ICLR 2023 Figure 10: Scatter plots between OPE-estimated (y-axis) and true (x-axis) returns over all 20 Gym- Mujoco tasks that are considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Part 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 17 Published as a conference paper at ICLR 2023 Figure 11: Scatter plots between OPE-estimated (y-axis) and true (x-axis) returns over all 20 Gym- Mujoco tasks that are considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Part 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 18 0000003 29 21 24 191 324 1 51 86 59 0Published as a conference paper at ICLR 2023 B MORE t-SNE VISUALIZATIONS Figure 12: t-SNE visualization over the latent space captured by VLM, illustrating encoded state- action visitations induced from all target policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Each point is colored by the corresponding policy from which it is generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Policies in the legend are sorted in the order of increasing performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Figure 13: t-SNE visualization over the latent space captured by VLM+RSA(MSE), illustrating encoded state-action visitations induced from all target policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Each point is colored by the corresponding policy from which it is generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Policies in the legend are sorted in the order of increasing performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Figures 12 and 13 above visualize the latent space captured by two ablation baselines, VLM and VLM+RSA(MSE), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It can be observed that comparing to the latent space captured by VLM are not disentangled well compared to VLBM (shown in Figure 8), as the state-action pairs induced by policies with different levels of performance are generally cluster together without explicit boundaries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Such a finding illustrated the importance of the use of RSA loss (7) empirically, as it can effectively regularize pψ(zt|zt−1, at−1, st) and allows the encoder to map the MDP states to an expressive and compact latent space from which the decoder can reconstruct states and rewards accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Moreover, Figure 13 shows that the latent representations of the state-action pairs captured by VLM+RSA(MSE) distributed almost uniformly over the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' This justifies the rationale provided in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3 where MSE is too strong to regularize the hidden states of the encoder and decoder, and is also consistent with the results reported in Figure 3 that MSE+RSA(MSE) performs worse than VLM in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 19 0 Lowest Performance 6 8 Highest Hopper-M-E 9 Halfcheetah-M-E Walker2d-M-E Ant-M-E Performance 10Tt 0 Lowest Performance 2 w 4 5 6 8 Highest 9 Halfcheetah-M-E Walker2d-M-E Ant-M-E Hopper-M-E 10 PerformancePublished as a conference paper at ICLR 2023 C ALGORITHMS FOR TRAINING AND EVALUATING VLBM Algorithm 1: Train VLBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Input: Model weights ψ, φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB, w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , wB, offline trajectories ρβ, and learning rate α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Begin: 1: Initialize ψ, φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB, w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , wB 2: for iter in 1 : max_iter do 3: Sample a trajectory [(s0, a0, r0, s1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , (sT −1, aT −1, rT −1, sT )] ∼ ρβ 4: zψ 0 ∼ qψ(z0|s0) 5: zφb 0 ∼ p(z0), for all b ∈ [1, B] 6: Run forward pass of VLBM following (3), (5) and (9) for t = 1 : T, and collect all variables needed to evaluate LV LBM as specified in (10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 7: ψ ← ψ + α∇ψLV LBM 8: for b in 1 : B do 9: φb ← φb + α∇φbLV LBM 10: wb ← wb + α∇wbLV LBM 11: end for 12: end for Algorithm 2: Evaluate VLBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Input: Trained model weights ψ, φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB, w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , wB Begin: 1: Initialize the list that stores the accumulated returns over all episodes R = [] 2: for epi in 1 : max_epi do 3: Initialize the variable r = 0 that tracks the accumulated return for the current episode 4: Initialize latent states from the prior, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', zφb 0 ∼ p(z0) for all b ∈ [1, B] 5: Initialize LSTM hidden states hφb 0 = 0 for all b ∈ [1, B] 6: Sample sφb 0 ∼ pφ(s0|zφb t ) for all b ∈ [1, B] and generate initial MDP state sφ 0 following (9) 7: for t in 1 : T do 8: Determine the action following the target policy π, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', at−1 ∼ π(at−1|sφ t−1) 9: for b in 1 : B do 10: Update hφb t , ˜hφb t , zφb t , sφb t , rφb t−1 following (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 11: end for 12: Generate the next state sφ t following (9), as well as the reward rφ t−1 ∼ pφ(rt−1|zφ1 t , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' zφB t ) = N � µ = � b wb · µ(rφb t−1),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Σdiag = � b w2 b · Σdiag(rφb t−1) � 13: Update r ← r + γt−1rφ t−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' with γ being the discounting factor 14: end for 15: Append r into R 16: end for 17: Average over all elements in R,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' which serves as the estimated return over π 20 Published as a conference paper at ICLR 2023 D EARLY TERMINATION OF ENVIRONMENTS Given that some Gym-Mujoco environments,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' including Ant,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hopper,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Walker2d and Pen,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' may terminate an episode before reaching the maximum steps,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' if the state violates specific constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Below we introduce how VLM and VLBM can be enriched to capture such early termination behaviors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLM For VLM, we introduce an additional component dφ t ∼ pφ(dt|zφ t ) to the generative pro- cess equation 5, where dφ t is a Bernoulli variable determining if an episode should be terminated at its t-th step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, pφ(dt|zφ t ) follows Bernoulli distribution, with mean determined by an MLP with sigmoid activation applied to the output layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' As a result, the generative process now follows hφ t = fφ(hφ t−1, zφ t−1, at−1), ˜hφ t = gφ(hφ t ), zφ t ∼ pφ(˜hφ t ), sφ t ∼ pφ(st|zφ t ), rφ t−1 ∼ pφ(rt−1|zφ t ), dφ t ∼ pφ(dt|zφ t ), at ∼ π(at|sφ t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (13) Moreover, we add in a new term to VLM’s training objective, in order to update the component introduced above during training, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', Learly_term V LM (ψ, φ) = LV LM(ψ, φ) + �T t=0 log pφ(dt|zt), (14) with LV LM(ψ, φ) being the original objective of VLM, as presented in equation 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLBM For VLBM, the termination of an episode is determined following, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', dφ t ∼ pφ(dt|zφ1 t , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , zφB t ) = Bernoulli(µ = � b wb · µd(dφb t )), (15) where µd(dφb t ) = φMLP b,µd (zφb t ) is the mean of dφb t produced from the b-th branch of the decoder, and φMLP b,µd is the corresponding MLP that maps zφb t to µd(dφb t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To update the components involved in the procedure above, we introduce a new term to the VLBM’s objective, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', Learly_term V LBM (ψ, φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB, w1, · · · , wB) (16) =LV LBM(ψ, φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , φB, w1, · · · , wB) + �T t=0 log pφ(dφ t |zφ1 t , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' , zφB t ), (17) with LV LBM being the original objective of VLBM, as presented in equation 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 21 Published as a conference paper at ICLR 2023 E BOUND DERIVATION We now derive the evidence lower bound (ELBO) for the joint log-likelihood distribution, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=',' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' log pφ(s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' r0:T −1) (18) = log � z1:T ∈Z pφ(s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' z1:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' r0:T −1)dz (19) = log � z1:T ∈Z pφ(s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' z1:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' r0:T −1) qψ(z0:T |s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' a0:T −1) qψ(z0:T |s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' a0:T −1)dz (20) ≥Eqψ[log p(z0) + log pφ(s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' z1:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' r0:T −1|z0) − log qψ(z0:T |s0:T ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' a0:T −1)] (21) =Eqψ � log p(z0) + log pφ(s0|z0) + �T t=1 log pφ(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' zt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' rt−1|zt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' at−1) − log qψ(z0|s0) − �T t=1 log qψ(zt|zt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' at−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' st) � (22) =Eqψ � log p(z0) − log qψ(z0|s0) + log pφ(s0|z0) + �T t=1 log � pφ(st|zt)pφ(rt−1|zt)pφ(zt|zt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' at−1) � − �T t=1 log qψ(zt|zt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' at−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' st) � (23) =Eqψ � �T t=0 log pφ(st|zt) + �T t=1 log pφ(rt−1|zt) − KL � qψ(z0|s0)||p(z0) � − �T t=1 KL � qψ(zt|zt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' at−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' st)||pφ(zt|zt−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' at−1) �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (24) Note that the transition from equation 20 to equation 21 follows Jensen’s inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 22 Published as a conference paper at ICLR 2023 F BASICS OF VARIATIONAL INFERENCE Classic variational auto-encoders (VAEs) are designed to generate synthetic data that share similar characteristics than the ones used for training (Kingma & Welling, 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Specifically, VAEs learn an approximated posterior qψ(z|x) and a generative model pφ(x|z), over the prior p(z), with x being the data and z the latent variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' It’s true posterior pφ(z|x) is intractable, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', pφ(z|x) = pφ(x|z)p(z) pφ(x) ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (25) since the marginal likelihood in the denominator, pφ(x) = � z pφ(x|z)p(z)dz, requires integration over the unknown latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' For the same reason, VAEs cannot be trained to directly maximize the marginal log-likelihood, max log pφ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To resolve this, one could maximize a lower bound of pφ(x), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', max ψ,φ −KL(qψ(z|x)||p(z)) + Eqψ[log pφ(x|z)], (26) which is the evidence lower bound (ELBO).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Reparameterization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' During training, it is required to sample from qψ(z|x) and pφ(x|z) constantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The reparameterization technique is introduced in (Kingma & Welling, 2013), to ensure that the gradients can flow through such sampling process during back-propagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' For example, if both distributions (qψ(z|x) and pφ(x|z)) follow diagonal Gaussians, with mean and diagonal covariance determined by MLPs, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', z ∼ qψ(z|x) = N � µ = ψMLP µ (x), Σ = ψMLP Σ (x) � , (27) x ∼ pφ(x|z) = N � µ = φMLP µ (z), Σ = φMLP Σ (z) � ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' (28) here, ψMLP µ , ψMLP Σ , φMLP µ , φMLP Σ are the MLPs that generate the means and covariances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' The sampling processes above can be captured by reparameterization, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=', z = ψMLP µ (x) + ψMLP Σ (x) · ϵ, (29) x = φMLP µ (z) + φMLP Σ (z) · ϵ, (30) with ϵ ∼ N(0, I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Consequently, the gradients over ψ and φ can be calculated following the chain rule, and used for back-propagation during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' We direct readers to (Kingma & Welling, 2013) for a comprehensive review of reparameterization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 23 Published as a conference paper at ICLR 2023 G ADDITIONAL RELATED WORKS Overview of latent-model based RL methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' In SLAC, latent representations are used to im- prove the sample efficiency of model-free RL training algorithms, by jointly modeling and learning dynamics and controls over the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Similarly, SOLAR improves data efficiency for multi- task RL by first learning high-level latent representations of the environment, which can be shared across different tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Then, local dynamics models are inferred from the abstraction, with con- trols solved by linear-quadratic regulators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' PlaNet and Dreamer further improve the architecture and training objectives of latent models, allowing them to look ahead multiple steps and plan for longer horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' There also exist LatCo which directly performs trajectory optimization over the latent space, allowing the agent to temporarily bypass dynamical constraints and quickly navigate to the high-reward regions in early training stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' To summarize, methods above leverage latent representations to gain sufficient exploration coverage and quickly navigate to high-reward regions, improving sample efficiency for policy optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Note that they mostly require online interactions with the environment to formulate a growing experience replay buffer for policy learning, which have different goals than OPE which requires learning from a fixed set of offline trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 24 Published as a conference paper at ICLR 2023 Rank Corr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Ant E Ant M-E Ant M Ant M-R Ant R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 VPM −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 DICE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 DR −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 FQE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='.60 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 Rank Corr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Halfcheetah E Halfcheetah M-E Halfcheetah M Halfcheetah M-R Halfcheetah R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 DICE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 Rank Corr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Walker2d E Walker2d M-E Walker2d M Walker2d M-R Walker2d R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 DICE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 Rank Corr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hopper E Hopper M-E Hopper M Hopper M-R Hopper R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 DICE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 DR −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 FQE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 VLM −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 VLM+RSA (MSE) −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 Table 1: Rank correlation between estimated and ground-truth returns for all Gym-Mujoco tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results are obtained by averaging over 3 random seeds used for training, with standard deviations shown in subscripts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 25 Published as a conference paper at ICLR 2023 Rank Corr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Door human Door cloned Door expert Pen human Pen cloned Pen expert IS −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 VPM −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 DICE −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 AR Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 Rank Corr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Hammer human Hammer cloned Hammer expert Relocate human Relocate cloned Relocate expert IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 VPM −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 DR −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 AR Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 Table 2: Rank correlation between estimated and ground-truth returns for all Adroit tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results are obtained by averaging over 3 random seeds used for training, with standard deviations shown in subscripts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 26 Published as a conference paper at ICLR 2023 Regret@1 Ant E Ant M-E Ant M Ant M-R Ant R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='050 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Regret@1 Halfcheetah E Halfcheetah M-E Halfcheetah M Halfcheetah M-R Halfcheetah R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='230.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='230.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 Regret@1 Walker2d E Walker2d M-E Walker2d M Walker2d M-R Walker2d R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='860.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='860.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 Regret@1 Hopper E Hopper M-E Hopper M Hopper M-R Hopper R IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='880.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 AR Ensemble .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 VLM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='22 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 VLM+RSA (MSE) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 VLM+RSA .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='040.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 Table 3: Regret@1 for all Gym-Mujoco tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results are obtained by averaging over 3 random seeds used for training, with standard deviations shown in subscripts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 27 Published as a conference paper at ICLR 2023 Regret@1 Door human Door cloned Door expert Pen human Pen cloned Pen expert IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 AR Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 Regret@1 Hammer human Hammer cloned Hammer expert Relocate human Relocate cloned Relocate expert IS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 VPM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 DICE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='48 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 DR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 FQE .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='00.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 AR Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33 VLBM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='080.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='02 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='07 Table 4: Regret@1 for all Adroit tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results are obtained by averaging over 3 random seeds used for training, with standard deviations shown in subscripts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Published as a conference paper at ICLR 2023 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='MAE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Ant ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Ant ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Ant ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Ant ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Ant ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='IS ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='605104 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='604102 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='594104 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='603101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='606103 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VPM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='607108 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='604106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='570109 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='612105 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57099 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DICE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='558108 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='471100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='49590 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='583110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53092 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='584114 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32666 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34566 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42172 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='404106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='FQE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='583122 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31967 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34564 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41079 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='398111 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='AR Ensemble ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55181 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62914 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57435 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6421 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='57561 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33115 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31520 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31031 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4866 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6632 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA (MSE) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34313 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3244 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3063 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='46321 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6618 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3517 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31423 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30525 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4483 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6654 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24220 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31237 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34580 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4646 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='66720 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLBM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2024 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26955 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='33143 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2652 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='59811 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='MAE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Halfcheetah ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Halfcheetah ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Halfcheetah ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Halfcheetah ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Halfcheetah ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='IS ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1404152 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1400146 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1217123 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1409154 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1405155 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VPM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='945164 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1427111 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1374153 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1384148 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1411154 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DICE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='944161 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1078132 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1382130 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1440158 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1446156 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='102595 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1015103 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1222134 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1001129 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='949126 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='FQE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='103195 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1014101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1211130 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1003132 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='938125 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='AR Ensemble ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1226222 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='48024 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='55364 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='84664 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='153716 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='520242 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52649 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='62453 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='147827 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14901 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA (MSE) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='469159 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42649 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='68939 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='143210 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14890 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='414155 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44650 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='622153 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='147320 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14926 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25320 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='773139 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1306113 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='146841 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='152522 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLBM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20122 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45630 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='51750 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1281170 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14952 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='MAE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Walker2d ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Walker2d ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Walker2d ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Walker2d ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Walker2d ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='IS ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40562 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43662 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42860 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42760 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43061 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VPM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36768 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42561 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42660 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42464 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44058 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DICE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43760 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='32260 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='27331 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='37451 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41957 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='519179 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21746 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36874 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29654 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34774 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='FQE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='453142 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23342 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35079 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='31373 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35473 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='AR Ensemble ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='530102 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4084 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4446 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='327106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38342 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='53830 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38012 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25017 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16046 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4528 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA (MSE) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52130 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='34020 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36119 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23614 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44315 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='52241 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='35886 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2539 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1257 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='326161 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='5389 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38623 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='20138 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='16811 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44124 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLBM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='51724 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28872 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24433 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='15628 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26222 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='MAE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hopper ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hopper ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-E ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hopper ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hopper ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='M-R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hopper ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='R ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='IS ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='10629 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36047 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='40548 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='43811 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41245 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VPM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='44243 43344 43844 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DICE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25954 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26640 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21541 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3982 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12216 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='42699 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23477 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='30785 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29814 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28950 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='FQE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28276 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='25228 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='28373 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2957 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='26142 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='AR Ensemble ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='36916 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='29211 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='39342 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='47734 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='45434 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14831 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='13619 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='21022 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1389 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='38266 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA (MSE) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='24640 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='18610 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='23229 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='12412 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41515 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2702 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='14015 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11728 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='11716 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='41220 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLM+RSA Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 25323 14942 23362 11517 30647 VLBM 2668 1404 12647 12421 38527 Table 5: MAE between estimated and ground-truth returns for all Gym-Mujoco tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results are obtained by averaging over 3 random seeds used for training, with standard deviations shown in subscripts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 29 Published as a conference paper at ICLR 2023 MAE Door human Door cloned Door expert Pen human Pen cloned Pen expert IS 870173 891188 648122 3926128 1707128 4547222 VPM 862163 1040188 879182 1569215 2324129 2325136 DICE 1108199 69779 856134 4193244 1454219 2963279 DR 37965 42473 1353218 2846200 132398 2013564 FQE 38960 43881 134384 2872170 1232105 1057281 AR Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='7343 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='8267 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='223616 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='216112 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1981106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1803226 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VLBM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='710152 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='9331 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='60084 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1637286 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='669270 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1002262 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='MAE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hammer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='human ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hammer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='cloned ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Hammer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='expert ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Relocate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='human ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Relocate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='cloned ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='Relocate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='expert ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='IS ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='73521118 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='74031126 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3052608 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='638217 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='632215 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2731147 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='VPM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='71051107 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='74591114 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='73121117 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='806166 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='586135 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='620214 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DICE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='5677936 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4169839 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3963758 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='4526474 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1347485 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1095221 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='DR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='5768751 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6101679 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='3485590 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='606116 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='412124 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1193350 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='FQE ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='6000612 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='5415558 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='2950728 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='593113 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='439125 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='1351393 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='AR Ens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 689727 724012 30578 8237 6626 21384 VLBM 6184479 7267402 2682146 62425 388183 2021270 Table 6: MAE between estimated and ground-truth returns for all Adroit tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Results are obtained by averaging over 3 random seeds used for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 30 Published as a conference paper at ICLR 2023 State Dim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Action Dim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Early Term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Continuous Ctrl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Dataset Dataset Size Ant 27 8 Yes Yes random 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='427 medium- replay 301,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='698 medium 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='175 medium- expert 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='998,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='158 expert 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='036 Halfcheetah 17 6 No Yes random 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='000 medium- replay 201,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='798 medium 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='000 medium- expert 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='998,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='000 expert 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='000 Hopper 11 3 Yes Yes random 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='999 medium- replay 401,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='598 medium 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='998 medium- expert 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='998,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='966 expert 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='061 Walker2d 17 6 Yes Yes random 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='999 medium- replay 301,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='698 medium 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='322 medium- expert 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='998,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='318 expert 999,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content='000 Table 7: Summary of the Gym-Mujoco environments and datasets used to train VLBM and baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' State Dim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Action Dim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Early Term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Continuous Ctrl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' Dataset Dataset Size Pen 45 24 Yes Yes human 4,975 cloned 496,264 expert 494,248 Door 39 28 No Yes human 6,704 cloned 995,642 expert 995,000 Hammer 46 26 No Yes human 11,285 cloned 996,394 expert 995,000 Relocate 39 30 No Yes human 9,917 cloned 996,242 expert 995,000 Table 8: Summary of the Adroit environments and datasets used to train VLBM and baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} +page_content=' 31' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZtFLT4oBgHgl3EQfWS9S/content/2301.12056v1.pdf'} diff --git a/_9AzT4oBgHgl3EQfTPsM/content/tmp_files/2301.01244v1.pdf.txt b/_9AzT4oBgHgl3EQfTPsM/content/tmp_files/2301.01244v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..9b5690fa56f5fcfc3f33beef8dadf560c5b2aada --- /dev/null +++ b/_9AzT4oBgHgl3EQfTPsM/content/tmp_files/2301.01244v1.pdf.txt @@ -0,0 +1,546 @@ +Thermalization of the Ablowitz-Ladik lattice in the +presence of non-integrable perturbations +MAHMOUD A. SELIM1, GEORGIOS G. PYRIALAKOS2, FAN O. WU2, ZIAD +MUSSLIMANI3, KONSTANTINOS G. MAKRIS4,5, MERCEDEH KHAJAVIKHAN1 AND +DEMETRIOS CHRISTODOULIDES1,* +1 Ming Hsieh Department of Electrical and Computer Engineering, University of Southern California, Los Angeles, California 90089, USA +2 CREOL, College of Optics and Photonics, University of Central Florida, Orlando, Florida 32816-2700, USA +3 Department of Mathematics, Florida State University, Tallahassee, FL 32306-4510, USA +4 Institute of Electronic Structure and Laser, Foundation for Research and Technology-Hellas (FORTH), P.O. Box 1527, 71110 Heraklion, Greece +5 ITCP, Department of Physics, University of Crete, 70013 Heraklion, Greece +*Corresponding author: demetri@creol.ucf.edu +Received XX Month XXXX; revised XX Month, XXXX; accepted XX Month XXXX; posted XX Month XXXX (Doc. ID XXXXX); published XX Month XXXX +We investigate the statistical mechanics of the photonic Ablowitz-Ladik lattice, the integrable version of the discrete +nonlinear Schrödinger equation. In this regard, we demonstrate that in the presence of perturbations the complex +response of this system can be accurately captured within the framework of optical thermodynamics. Along these lines, +we shed light on the true relevance of chaos in the thermalization of the Ablowitz-Ladik system. Our results indicate that +when linear and nonlinear perturbations are incorporated, this weakly nonlinear lattice will thermalize into a proper +Rayleigh-Jeans distribution with a well-defined temperature and chemical potential. This result illustrates that in the +supermode basis, a non-local and non-Hermitian nonlinearity can in fact properly thermalize this periodic array in the +presence of two quasi-conserved quantities. + +In recent years, there has been a resurgence of interest in +investigating optical wave propagation phenomena in nonlinear +photonic lattices. In general, such configurations can display rich +light dynamics that have no counterpart in continuous settings. In +this respect, optical waveguide arrays provide a fertile ground +where several intriguing processes can be observed, ranging from +discrete solitons [1–4] and Bloch oscillations [5,6], to dynamic +localization processes [7–9] and Rabi oscillations [10]. Under tight- +binding conditions, light evolution in these optical configurations +can be accurately described by the discrete nonlinear Schrödinger +equation (DNLS) [1-4]. Relevant to this topic, is the fully integrable +version of DNLS [11], the so-called Ablowitz-Ladik (AL) model, that +is known to possess an inverse scattering transform [11,12]. The +integrability of this system allows one to exploit some of its +hallmark features in studying, for instance, non-resonant +reflectionless potentials [13], Bose-Einstein condensates [14], +discrete vortices [15], dark soliton collisions [16], and optical rogue +waves [17]. Moreover, the unidirectional flow of discrete solitons in +a photonic lattice has been demonstrated by employing the AL +potential [18]. In higher dimensions, the AL equation exhibits exotic +solutions and complex waveforms, for instance, line solitons and X- +shaped vortices [15]. Yet, at this point, little is known if any as to the +nonlinear response of perturbed AL lattices, especially in systems +where the number of modes is very large. In general, this represents +a challenging problem given that the integrability of the AL model +can be broken in the presence of perturbations in which case the +field evolution in such a lattice becomes utterly complex and chaotic. +Quite recently, a self-consistent thermodynamic formalism has +been put forward in an effort to predict in a statistical manner the +response of nonlinear highly multimoded photonic systems [19– +27]. This approach is universal: it can be utilized in both discrete and +continuous settings in the presence of any arbitrary nonlinearities +as long as more than two invariants are manifested [19]. One of the +basic tenets of statistical mechanics is that the system under study +is ergodic, i.e., it should be able to fully explore its phase space in a +fair manner. In nonlinear multimode systems, this property +naturally follows because of chaos. At this juncture, the following +question naturally arises: can the AL lattice thermalize, and if so, +under which conditions? It is worth emphasizing that a periodic AL +lattice with 𝑀 sites is fully integrable given that it exhibits 𝑀 +conservation laws. As such, it does not display chaos and thus the +corresponding asymptotic Lyapunov exponents are zero [28]. +Finally, in the context of the AL lattice, one may also ask whether it +is indeed conceivable to identify an appropriate basis where +thermalization can be explored [28]. +To address this question, here we investigate the thermalization +dynamics of the weakly nonlinear Ablowitz-Ladik system in both its +integrable form as well as in the presence of non-integrable +perturbations. In the latter case, we show that the thermalization of +this lattice can be captured within the framework of optical +thermodynamics [19] when the system is investigated within its +linear supermode basis. When linear on-site perturbations are + + + +introduced (akin to those associated with Anderson’s localization +[29–31]), we show that complete thermalization into a proper +Rayleigh-Jeans distribution is eventually attained at thermal +equilibrium. This result illustrates that a non-local and non- +Hermitian nonlinearity can in fact properly thermalize an optical +lattice in the presence of two quasi-conserved quantities. Similar +results are also obtained when non-integrable nonlinearities are +involved. +In general, nonlinear wave propagation in a perturbed periodic +AL system consisting of 𝑀 waveguide sites (Fig. 1) is governed by +the following evolution equation [11]: + +𝑖 𝑑𝑎𝑛 +𝑑𝑧 + (𝑎n+1 + 𝑎n−1) + |𝑎𝑛|2(𝑎n+1 + 𝑎n−1) ++𝛾|𝑎𝑛|2𝑎𝑛 + 𝛿𝑓𝑛𝑎𝑛 = 0, +(1) + +where 𝑎𝑛 stands for the local optical field amplitude at +waveguide site 𝑛, while 𝛾 and 𝛿 are scaling parameters for the Kerr +nonlinearity and refractive index perturbations, respectively. In the +limit where 𝛾 and 𝛿 are zero, Eq. (1) is reduced to the fully +integrable AL equation that is known to display 𝑀 conservation +laws. In Eq. (1), 𝑓𝑛 represents an on-site random normal +perturbation in the corresponding channel’s propagation constant +that is obtained from a Gaussian distribution with zero mean and a +root-mean-square width 𝜎 = 1. Given that the perturbation terms +in this equation break the lattice symmetries and hence its +integrability, one will expect that some of the invariants will vanish. +Nonetheless, these perturbations conserve the time and phase shift +symmetry, indicating that at least two invariant quantities shall +persist. As we will see, in this case, the optical power (norm) and +internal energy will still remain ‘quasi-invariant’. These two +quantities are associated with two conservation laws associated +with the integrable AL model, which are given by [32] + +𝐾(1) = ∑ +𝑀 +𝑛=1 +ln(1 + |𝑎𝑛|2) , +(2a) + +𝐾(2) = − ∑ +𝑀 +𝑛=1 +(𝑎𝑛𝑎𝑛+1 +∗ ++ 𝑎𝑛 +∗ 𝑎𝑛+1 + +) . +(2b) + +Fig. 2. Nonlinear dynamics in unperturbed and perturbed AL +lattices when 𝑃 = 10, 𝑈 = 10.77 and 𝑀 = 100. In all cases, the +initial power distribution at the input is shaded in blue while the +output modal occupancies are depicted by red dots. The black +curves represent the anticipated RJ distribution, if occurred. The +figures in the right panels show the evolution of the modal +occupancies with distance. In (a) and (b) the unperturbed AL +system fails to thermalize while in (c)-(f) it settles into a RJ +distribution once a small disorder ((c)-(d)) or a Kerr nonlinearity +((e)-(f)) is introduced, respectively. Figures (g) and (h) indicate that +𝑈 and 𝑃 remain quasi-invariant during propagation. + +In what follows, we will investigate the thermalization dynamics of +perturbed AL arrangements under weak nonlinear conditions. Note +that in the weakly nonlinear regime, the 𝐾(1) invariant is +approximately equal to the total power 𝑃 flowing in the lattice since +𝑙𝑛(1 + 𝑥) ≈ 𝑥 when |𝑎𝑛|2 ≪ 1. Therefore, + + + +𝑃 = ∑ +𝑀 +𝑛=1 +|𝑎𝑛|2 ≈ 𝐾(1) . +(3) + Of importance, will be to express these two constants of motion +within the supermode basis of this periodic array. It is worth + +Fig. 1. A periodic Ablowitz-Ladik lattice consisting of 𝑀 coupled +waveguide channels. + +(a) +(b) +-300 +0 +-400 +S-Entropy +0.3 + 1, Alpine refines the active partition of each partitioned variable. Suppose +Alpine’s partition ˆPk−1 +i += [ˆpi +0, ˆpi +1, . . . , ˆpi +2k−2, ˆpi +2k−1] for variable xi at iteration k − 1, where 0 =: ˆpi +0 ≤ +ˆpi +1 ≤ · · · ˆpi +2k−2 ≤ ˆpi +2k−1 := 1. Let ¯xk−1 denote the x-component of a solution to the piecewise McCormick +relaxation-based lower bounding problem (PMR) at iteration k − 1 with the above variable partitions. We +say that the jth partition [ˆpi +j−1, ˆpi +j] of variable xi is active at iteration k − 1 (relative to the solution ¯xk−1) +if ˆpi +j−1 ≤ ¯xk−1 +i +≤ ˆpi +j, or (equivalently) if there exists an optimal solution to (PMR) such that yi +j = 1. Let +A(i, k − 1) denote the index of an active partition of variable xi at iteration k − 1. At iteration k, Alpine +refines the active partition [ˆpi +A(i,k−1)−1, ˆpi +A(i,k−1)] of each partitioned variable xi, i ∈ [n], around the lower +bounding solution ¯xk−1 +i +as follows: +ˆPk +i := +� +ˆpi +0, . . . , ˆpi +A(i,k−1)−1, max +� +ˆpi +A(i,k−1)−1, ¯xk−1 +i +− width(A(i, k − 1)) +∆ +� +, +min +� +ˆpi +A(i,k−1), ¯xk−1 +i ++ width(A(i, k − 1)) +∆ +� +, ˆpi +A(i,k−1), . . . , ˆpi +2k−1 +� +, +7 + +where width(A(i, k−1)) := ˆpi +A(i,k−1) − ˆpi +A(i,k−1)−1 is the width of the active partition of xi at iteration k−1. +The above setting for the partition ˆPk +i can be viewed as a generalization of the setting for the partition ˆP1 +i at +Alpine’s first iteration. A motivation for adding partitioning points around the solution ¯xk−1 stems from the +fact that the piecewise McCormick relaxations need to be refined around this (infeasible) solution in order +to be able to exclude it from the feasible region of (PMR) at iteration k. This heuristic partitioning strategy +was chosen because it empirically performs well on numerous test instances, particularly for instances of the +pooling problem [53]. Alpine continues to refine its variable partitions until its upper and lower bounds on +the optimal objective value of (QCQP) converge to within a specified tolerance. +4 +Strong partitioning for nonconvex QCQPs +The choice of partitioning points in the initial iterations can greatly impact Alpine’s lower bounds, number of +iterations for convergence, and overall solution time. While there are some motivations for Alpine’s default +partitioning strategy, it is still ad hoc for a few reasons: it uses the same parameter ∆ to partition the +domains of all variables, and it only considers symmetric partitions around the reference point ˆx. +The +quality of Alpine’s initial partitions also depend on the quality of the feasible solution determined during +presolve, with sub-optimal or infeasible presolve solutions potentially leading to sub-optimal initial partitions +and slow convergence overall. Hence, we propose strong partitioning (SP) to address the above limitations +of Alpine’s partitioning strategy. +The concept of strong partitioning is akin to strong branching in B&B algorithms for MILPs. Strong +branching for MILPs only chooses the branching variable (a discrete choice) at a node to maximize some +function of the lower bound improvements at its two children nodes. Strong partitioning, on the other hand, +chooses partitioning points for each partitioned variable (continuous choices within the variable domains) +such that the resulting piecewise McCormick relaxation lower bound is maximized. It can be formulated as +the following max-min problem: +P ∗ ∈ arg max +P ∈P +v(P), +(SP) +where v(P) is the value function of (PMR-OA) and the set P is defined as +P := +� +P ∈ [0, 1]d×n : 0 ≤ pi +1 ≤ pi +2 ≤ · · · ≤ pi +d ≤ 1, ∀i ∈ [n] +� +. +The strong partitioning problem (SP) is challenging to solve even to local optimality because the inner- +minimization problem (PMR-OA) includes binary decisions and its feasible region depends on P (variables +of the outer-maximization). +While (SP) can be formulated as a generalized semi-infinite program [64], +state-of-the-art global optimization algorithms for this problem class do not scale even for moderate prob- +lem dimensions. Therefore, we design a local optimization method for (SP) with the hope of determining +partitioning points ¯P ∈ P that yield a tight lower bound v( ¯P). +We use generalized gradients of the value function of the inner-minimization (PMR-OA) within a bundle +solver for nonsmooth nonconvex optimization to solve problem (SP) to local optimality. Although the value +function of an MILP might be discontinuous in general, (PMR-OA) possesses special structure because (outer- +approximations of) piecewise McCormick relaxations are nonconvex piecewise-linear continuous functions (cf. +Figure 1), which allows for the computation of sensitivity information in this setting. The bundle solver, +MPBNGC [48], that we use requires function and generalized gradient evaluations at points P ∈ P during the +course of its algorithm. Each function evaluation v(P) requires the solution of the MILP (PMR-OA). Under +suitable assumptions, a generalized gradient ∂P v(P) can be obtained by fixing y to an optimal y solution +of (PMR-OA) and computing a generalized gradient of the resulting LP (8) using parametric sensitivity +theory [21]. We formalize these details in the next section. Before we proceed, we include the convergence +guarantees of MPBNGC [48] below for the sake of completeness. +Definition 1. Let Z ⊂ RN be open. A locally Lipschitz function f : Z → R is said to be weakly semismooth +if the directional derivative f ′(z, d) = limt↓0 +f(z+td)−f(z) +t +exists for all z ∈ Z, d ∈ RN and f ′(z, d) = +limt↓0 ξ(z + td)Td for ξ(z + td) ∈ ∂f(z + td). +8 + +Definition 2. Let f : RN → R, g : RN → RM be locally Lipschitz continuous. Consider the problem +minz:g(z)≤0 f(z). A feasible point z∗ is said to be substationary if there exist multipliers λ ≥ 0 and µ ∈ RM ++ , +with (λ, µ) ̸= (0, 0), such that +0 ∈ λ∂f(z∗) + +M +� +j=1 +µj∂gj(z∗), +µjgj(z∗) = 0, ∀j ∈ [M]. +Theorem 1. Suppose the function v is weakly semismooth. +Then MPBNGC either terminates finitely +with a substationary point to (SP), or any accumulation point of a sequence of MPBNGC solutions is a +substationary point to (SP). +Proof. See Theorem 9 of [48]. +The following example shows that the value function v may be nonsmooth. +Example 1. Consider the following instance of the QCQP (1): +min +x∈[0,1] x s.t. +x2 ≥ (0.4)2. +Clearly, the optimal solution is x∗ = 0.4 with optimal value v∗ = 0.4. Suppose we wish to partition the +domain of x into two sub-intervals (d = 1). Let P = [0, p, 1] denote the partition of x with 0 ≤ p ≤ 1. After +some algebraic manipulation, the outer-approximation problem (PMR-OA) can be reformulated as +v(p) = min +x∈[0,1] x s.t. w ≥ (0.4)2, w ≤ max{px, (1 + p)x − p}, w ≥ 2αjx − α2 +j, ∀j ∈ J , +where {αk +j }j∈J ⊂ [0, 1] and we write v(p) to indicate the dependence on the partitioning point p. We can +derive the piecewise McCormick lower bound to be +v(p) = +� 0.16+p +1+p , +if 0 ≤ p ≤ 0.4 +0.16 +p , +if 0.4 < p ≤ 1 , +p +v(p) +0 +0.4 +1 +0.16 +0.4 +which shows that v is continuous and piecewise differentiable at p = 0.4. +4.1 +Computing generalized gradients of v +We begin with the following useful result. Note that the assumption that the y solution of (7) is unique +can be verified by adding a “no-good cut” and re-solving (7) to check if the second-best solution for y has a +strictly greater objective than v(P). +Lemma 2. Suppose problem (7) has a unique y solution y∗ ∈ Y at P ∈ P and v(·, y∗) is continuous at P. +Then v( ˜P) = v( ˜P, y∗), ∀ ˜P ∈ P in a neighborhood of P. +Proof. Because y∗ is the unique y solution to (7) at P ∈ P, v(P, y∗) < v(P, y), ∀y ∈ Y \{y∗}. +To see +v(·) ≡ v(·, y∗) in a neighborhood of P, we show that the value function v(·, y) is lower semicontinuous on P +for each y ∈ Y . The stated result then holds since v(·, y∗) is assumed to be continuous at P. +The set-valued mapping P ∈ P �→ {z ≥ 0 : M(P, y) = ¯By+¯b} is locally compact for each y ∈ Y by virtue +of the continuity of the mapping M(·, y) and the finite bounds on all of the variables in problem (PMR-OA). +Lemma 5.3 of Still [61] then implies that v(·, y) is lower semicontinuous on P for each y ∈ Y . +The next result characterizes the gradient of v in the non-degenerate case. +9 + +Theorem 3. Suppose P ∈ P and problem (7) has a unique y solution y∗ ∈ Y . Consider the LP (8) with y +fixed to y∗. If this LP has a unique primal solution z∗ and a unique dual solution π∗, then +∂v +∂pi +j +(P) = ∂v +∂pi +j +(P, y∗) = +nr +� +k=1 +nc +� +l=1 +−π∗ +kz∗ +l +∂Mkl +∂pi +j +(P, y∗), +∀i ∈ [n], j ∈ [d]. +Proof. Lemma 2 implies v(·) ≡ v(·, y∗) in a neighborhood of P provided v(·, y∗) is continuous at P. Theorem 1 +of Freund [25] (cf. Proposition 4.1 of [21]) and the fact that the function M(·, y∗) is continuously differentiable +on P together imply v(·, y∗) is continuously differentiable at P and the stated equalities hold. +Next, we derive a formula for the generalized gradient ∂P v(P) when the assumption that the LP (8) is +non-degenerate fails to hold. +Theorem 4. Suppose P ∈ P and problem (7) has a unique y solution y∗ ∈ Y . Consider the LP (8) with y +fixed to y∗. Suppose v(·, y∗) is finite and locally Lipschitz in a neighborhood of P. Then +∂P v(P) = ∂P v(P, y∗) = conv +� nr +� +k=1 +nc +� +l=1 +−π∗ +kz∗ +l +∂Mkl +∂P (P, y∗) : (z∗, π∗) is a primal-dual optimal pair for (8) +� +. +Proof. Lemma 2 implies v(·) ≡ v(·, y∗) in a neighborhood of P. The stated equalities hold by mirroring +the proof of Theorem 5.1 of De Wolf and Smeers [21] and noting that the function M(·, y∗) is continuously +differentiable on P. +De Wolf and Smeers [21] (see Assumption 5.1) and Im [32] argue that the following assumption ensures +v(·, y∗) is locally Lipschitz in a neighborhood of P ∈ P. +Lemma 5. Suppose P ∈ P and ¯y ∈ Y . Consider the LP (8) with y fixed to ¯y. If the matrix M(P, ¯y) has full +row rank and ¯B¯y + ¯b ∈ int +� +{M(P, ¯y)z : z ≥ 0} +� +, then v(·, ¯y) is finite and locally Lipschitz in a neighborhood +of P. +Proof. See Proposition 5.3 of [21] and pages 73 to 76 of [32]. +We now verify that the full rank assumption in Lemma 5 holds in general. +Lemma 6. The matrix M(P, y) has full row rank, ∀P ∈ int(P) and y ∈ Y . +Proof. Fix y ∈ Y . Since P ∈ int(P), we have 0 < pi +1 < pi +2 < · · · < pi +d < 1 for each i ∈ [n]. We show that +for each (i, j) ∈ B and k ∈ Q, the equality constraints in equations (4a)-(4d) and equations (5a)-(5c) have +full row rank, which readily imply that M(P, y) has full row rank. We ignore inequality constraints because +they are transformed into equality constraints by the addition of unique slack variables. +We begin by focusing on the equality constraints in (4a)-(4d) involving the x, w, and λ variables. Consider +a fixed (i, j) ∈ B. Since at most four of the λij variables may be nonzero, we can rewrite these equality +constraints as follows after a change of variables (here, A(i) denotes the active partition of xi): +� +� +� +� +� +−1 +0 +0 +pi +A(i)−1 +pi +A(i)−1 +pi +A(i) +pi +A(i) +0 +−1 +0 +pj +A(j)−1 +pj +A(j) +pj +A(j)−1 +pj +A(j) +0 +0 +−1 +pi +A(i)−1pj +A(j)−1 +pi +A(i)−1pj +A(j) +pi +A(i)pj +A(j)−1 +pi +A(i)pj +A(j) +0 +0 +0 +1 +1 +1 +1 +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +xi +xj +wij +λij +1 +λij +2 +λij +3 +λij +4 +� +� +� +� +� +� +� +� +� +� += +� +� +� +� +0 +0 +0 +1 +� +� +� +� . +We argue that the following sub-matrix is of full rank whenever P ∈ int(P): + + + + + +pi +A(i)−1 +pi +A(i)−1 +pi +A(i) +pi +A(i) +pj +A(j)−1 +pj +A(j) +pj +A(j)−1 +pj +A(j) +pi +A(i)−1pj +A(j)−1 +pi +A(i)−1pj +A(j) +pi +A(i)pj +A(j)−1 +pi +A(i)pj +A(j) +1 +1 +1 +1 + + + + + . +10 + +Subtracting the first column from the second column, the third from the fourth column, and finally the first +from the third column yields the column vectors +� +pi +A(i)−1, pj +A(j)−1, pi +A(i)−1pj +A(j)−1, 1 +� +, +� +0, (pj +A(j) − pj +A(j)−1), pi +A(i)−1(pj +A(j) − pj +A(j)−1), 0 +� +, +� +(pi +A(i) − pi +A(i)−1), 0, pj +A(j)−1(pi +A(i) − pi +A(i)−1), 0 +� +, +� +0, (pj +A(j) − pj +A(j)−1), pi +A(i)(pj +A(j) − pj +A(j)−1), 0 +� +. +It is easy to see these vectors are linearly independent if 0 < pi +A(i)−1 < pi +A(i) < 1, ∀i. +Next, we focus on the equality constraints in (5a)-(5c) involving the x, w, and λ variables for a fixed +k ∈ Q. Since at most two of the λk variables may be nonzero, we can rewrite these equality constraints as +follows after a change of variables: +�−1 +pk +A(k)−1 +pk +A(k) +0 +1 +1 +�  + +xk +λk +1 +λk +2 + + = +�0 +1 +� +. +The last two matrix columns are linearly independent if 0 < pk +A(k)−1 < pk +A(k) < 1. +Finally, we show that problem (7) has a unique y solution for almost every (a.e.) P ∈ P, which ensures +that Theorem 3 or 4 is applicable for a.e. P ∈ P. +Theorem 7. Suppose every optimal solution of (1) has at least one active inequality involving nonconvex +terms. Additionally, suppose the optimal value of the termwise McCormick relaxation (2) is strictly less +than v∗. Then problem (7) has a unique y solution for a.e. P ∈ P (with respect to the uniform measure on +P). +Proof. Let P ∈ P. Suppose problem (7) has optimal y solutions ˜y, ˆy ∈ Y with ˜y ̸= ˆy. Then, there exist +(non-singular) basis matrices ˜ +M(P, ˜y) and ˆ +M(P, ˆy) for the LPs (8) corresponding to ˜y and ˆy, respectively, +such that +v(P) = v(P, ˜y) = ˜cT[ ˜ +M(P, ˜y)]−1( ¯B˜y + ¯b) = ˆcT[ ˆ +M(P, ˆy)]−1( ¯Bˆy + ¯b) = v(P, ˆy) +(9) +for suitable vectors ˜c and ˆc, which only include the components of ¯c corresponding to the basic variables. +Because at every optimal solution of (1), at least one inequality involving nonconvex terms is active and not all +x variables are at their bounds (otherwise, the optimal value of (2) would equal v∗), we may assume without +loss of generality that some of the entries of either ˜ +M(P, ˜y) or ˆ +M(P, ˆy) are functions of P. Equation (9) +thus yields a polynomial equation in the partitioning points P. Therefore, the set of all P ∈ P such that (9) +holds has measure zero. Noting that |Y | < +∞ and the number of possible bases is finite for each y ∈ Y +concludes the proof. +4.2 +Algorithmic enhancements +We design preprocessing and postprocessing steps that can be used to mitigate the computational burden +of solving (SP) and enable our ML model to more effectively learn its solution. +The outer-maximization in problem (SP) involves n × d partitioning variables. Since larger problem +dimensions may increase both the per-iteration cost and number of iterations taken by the bundle solver to +converge, we propose preprocessing heuristics to fix a subset of the partitioning points P and to compute +an initial guess P 0 for the bundle method. After solving the max-min problem (SP) (line 17), we propose +postprocessing steps to eliminate partitioning points in its solution ¯P that do not significantly affect the lower +bound v( ¯P). Algorithm 1 includes detailed pseudocode of our preprocessing (lines 1–16) and postprocessing +steps (lines 18–27). +11 + +Algorithm 1 Preprocessing and postprocessing steps +Preprocessing steps +1: Initialize partitions P0 +i := [0, 1], ∀i ∈ [n] +2: Solve the McCormick relaxation (2) to compute a lower bounding solution ¯x0 +3: for k = 1, 2, . . ., d do +4: +for i = 1, 2, . . ., n do +5: +if ¯xk−1 +i +≈ ˜xi for some ˜xi ∈ Pk−1 +i +then +6: +Set Pk +i = Pk−1 +i +7: +else +8: +Insert ¯xk−1 +i +in Pk−1 +i +to obtain Pk +i +9: +end if +10: +end for +11: +Solve (PMR-OA) with partitions {Pk +i }i∈[n] to determine solution ¯xk +12: end for +13: Let ni := |Pd +i | − 2, ∀i ∈ [n] +14: Let [0, pi0 +d−ni+1, pi0 +d−ni+2, . . . , pi0 +d , 1] denote Pd +i and set pi0 +j := 0, ∀j ∈ [d − ni] +15: Set the initial guess for (SP) to P 0, where P 0 +ij := pi0 +j , ∀i ∈ [n], j ∈ [d] +16: Fix variables pi +j, j ∈ [d − ni], to 0 while solving (SP) +17: Solve max-min problem (SP) to obtain a solution ¯P ∈ P with objective ¯v := v( ¯P) +Postprocessing steps +18: for j = 1, 2, . . ., d do +19: +for i = 1, 2, . . ., n do +20: +Set ˆP = ¯P and replace the element ˆpi +j in ˆP with zero +21: +Solve (PMR-OA) with partitioning points ˆP to obtain bound ˆv := v( ˆP) +22: +if ˆv ≤ ¯v + 10−6|¯v| then +23: +Update ¯P = ˆP and sort it such that ¯Pij ≤ ¯Pij+1, ∀i ∈ [n], j ∈ [d − 1] +24: +end if +25: +end for +26: end for +27: Return the postprocessed solution ¯P +5 +Numerical experiments +We study the impact of using strong partitioning to specify Alpine’s variable partitions at the first iteration +and investigate an off-the-shelf ML model for learning these partitions for homogeneous QCQPs. We begin +by describing the setup for our computational experiments. In Section 5.1, we outline the procedure for +generating families of random QCQP instances, including instances of the pooling problem. We detail our +ML approximation of strong partitioning in Section 5.2, and compare the performance of strong partitioning +and its ML approximation against Alpine’s default partitioning strategy in Section 5.3. +Our strong partitioning code is written in Julia 1.6.3 and implemented within Alpine.jl v0.4.11. We use +JuMP.jl v1.1.1 and use Gurobi 9.1.2 via Gurobi.jl v0.11.3 for solving LPs, MILPs, and convex MIQCQPs +(with MIPGap = 10−6). To solve NLPs locally within Alpine2, we use either Ipopt 3.14.4 via Ipopt.jl v1.0.3 +(with max iter = 104), or Artelys Knitro 12.4.0 via KNITRO.jl v0.13.0 (with algorithm = 3). We use +the bundle solver MPBNGC 2.0 [48] via MPBNGCInterface.jl3 (with OPT LMAX = 20, OPT EPS = 10−9, and +1https://github.com/lanl-ansi/Alpine.jl +2We switch Alpine’s local solver between Ipopt for the random bilinear and QCQP instances and Knitro for the random +pooling instances because Ipopt is ineffective for the pooling instances. +3https://github.com/milzj/MPBNGCInterface.jl +12 + +OPT NITER = OPT NFASG = 500) to solve the max-min problem (SP) to local optimality. We consider strong +partitioning with either two or four partitioning points per partitioned variable in addition to the variable +bounds and use scikit-learn v0.23.2 [57] to design its ML approximation. To demonstrate the non-trivial +nature of our nonconvex test instances, we also solve them to global optimality using BARON 22.11.3 via +BARON.jl v0.8.0 and provide BARON with the option of using CPLEX 22.1.0 as an MILP solver. +All of our experiments were run on nodes of the Darwin cluster at LANL with dual socket Intel Broadwell +18-core processors (E5-2695 v4 CPUs, base clock rate at 2.1GHz), EDR InfiniBand, and 125GB of memory. +Each instance was run exclusively on a single node and different solution approaches were run in sequence +to limit the impact of variability in machine performance. All Alpine and BARON runs were given a time +limit of 2 hours with target relative and absolute optimality gaps of 10−4 and 10−9, respectively4. +No +time limit was specified for solving the max-min problem (SP). The rest of BARON’s options, including +range reduction options, were kept to default. We deactivate bounds tightening techniques within Alpine +because it is largely ineffective for our medium and large-scale instances (our approaches are easily adapted +to the setting where bounds tightening is employed). We partition the domains of all variables participating +in nonconvex terms within Alpine, and set the rest of Alpine’s options to default, including the partition +scaling factor to ∆ = 10. +5.1 +Test Instances +We describe how we generate homogeneous families of random QCQPs, including instances of the pooling +problem, based on the literature. Scripts for generating the different families of instances can be found at +https://github.com/lanl-ansi/Alpine.jl/tree/master/examples/random_QCQPs. +5.1.1 +Random bilinear programs +We consider parametric bilinear programs of the form [7]: +v(θ) := +min +x∈[0,1]n xTQ0(θ)x + (r0(θ))Tx +s.t. +xTQi(θ)x + (ri(θ))Tx ≤ bi, +∀i ∈ [mI], +(aj)Tx = dj, +∀j ∈ [mE], +where θ ∈ [−1, 1]dθ are parameters, rk(θ) ∈ Rn, k ∈ {0}∪[mI], Qk(θ) ∈ Rn×n, k ∈ {0}∪[mI], are symmetric +but not necessarily positive semi-definite, aj ∈ Rn, j ∈ [mE], b ∈ RmI, and d ∈ RmE. +We generate 1000 instances for each of n ∈ {10, 20, 50} variables with |B| = min{5n, +�n +2 +� +} bilinear terms +(we count xixj and xjxi as the same bilinear term; all instances for a fixed dimension n have the same +set of |B| bilinear terms), |Q| = 0 quadratic terms, mI = n bilinear inequalities, and mE = 0.2n linear +equalities [7]. We let the dimension dθ = 3 × (0.2mI + 1) (see below for why we make this choice). The +problem data is generated as follows (cf. [7]). All entries of the vectors aj and d are generated i.i.d. from +the uniform distribution U(−1, 1), and all entries of the vector b are generated i.i.d. from U(0, 100). The +components of θ are generated i.i.d. from U(−1, 1). Each Qk and rk, k ∈ {0, 1, . . ., 0.2mI}, are of the form: +Qk(θ) = ¯Qk + +3k+3 +� +l=3k+1 +θl ˜Qk,l−3k, +rk(θ) = ¯rk + +3k+3 +� +l=3k+1 +θl˜rk,l−3k. +The nonzero entries of the “nominal matrices” ¯Qk and “nominal vectors” ¯rk are generated i.i.d. from U(−1, 1). +For each tuple (i, j) ∈ B and indices k ∈ {0, 1, . . ., 0.2mI} and l ∈ {1, 2, 3}, we set ˜Qk,l +ij := γk,l +ij ¯Qk +ij, where +γk,l +ij +are generated i.i.d. from U(0, 0.5). +Similarly, for each index i ∈ {1, . . . , n}, k ∈ {0, 1, . . ., 0.2mI}, +and l ∈ {1, 2, 3}, we set ˜rk,l +i +:= δk,l +i ¯rk +i , where δk,l +i +are generated i.i.d. from U(0, 0.5). +Since each ˜Qk,l +and ˜rk,l is a different perturbation of ¯Qk and ¯rk, the expansions of Qk and rk may be motivated using +4Alpine’s definition of relative gap differs slightly from BARON’s definition, see Section 5.3.1. +13 + +principal components analysis. +The nonzero entries of the remaining matrices Qk and vectors rk, k ∈ +{0.2mI + 1, . . . , mI}, are the same across all 1000 instances and generated i.i.d. from U(−1, 1). Finally, the +constraint coefficients are re-scaled such that the vectors b = d = 1. Note that for a fixed dimension n, each +instance is uniquely specified by the parameters θ. +5.1.2 +Random QCQPs with bilinear and univariate quadratic terms +We also generate 1000 random QCQPs with |B| = min{5n, +�n +2 +� +} bilinear terms and |Q| = ⌊0.25n⌋ univariate +quadratic terms for each of n ∈ {10, 20, 50} variables (all instances for a fixed n have the same set of bilinear +and univariate quadratic terms). The coefficients of quadratic terms in the objective and constraints are +generated similarly to the coefficients of bilinear terms in Section 5.1.1. The rest of the model parameters +and problem data are also generated similarly as in Section 5.1.1. +5.1.3 +The pooling problem +The pooling problem is a classical example of a bilinear program introduced by Haverly [30]. It has several +important applications in process systems engineering, including petroleum refining [33, 66], natural gas +production [33, 42], and water treatment network design [10, 50, 59]. Its goal is to blend inputs of differing +qualities at intermediate pools to produce outputs that meet quality specifications while satisfying capacity +constraints at inputs, pools, and outputs. Solving the pooling problem is NP-hard [3]. +We consider instances of the pooling problem with 45 inputs, 15 pools, 30 outputs, and a single quality. +Each instance has 116 input-output arcs, 71 input-pool arcs, and 53 pool-output arcs, yielding 572 variables +and 621 constraints, including 360 linear constraints and 261 bilinear equations (with 124 variables involved +in bilinear terms). We use the pq-formulation of the pooling problem outlined in Section 2 of [47]. Note that +unlike the random bilinear instances in Section 5.1.1 where all of the original “x variables” participate in +bilinear terms, only 124 out of the 311 original variables in the pooling model participate in bilinear terms. +We first generate a nominal instance using the “random Haverly” instance generation approach5 in [47] +that puts together 15 perturbed copies of one of Haverly’s pooling instances [30] and adds 150 edges to it. +We modify the target output quality concentrations generated by [47] to construct harder instances. For +each output j, we compute the minimum cmin +j +and maximum cmax +j +input concentrations of the quality over +the subset of inputs from which there exists a path to output j. We then specify the lower and upper bound +on the quality concentration at output j to be cmin +j ++αj(cmax +j +−cmin +j +) and cmin +j ++βj(cmax +j +−cmin +j +), respectively, +where αj ∼ U(0.2, 0.4) and βj ∼ U(0.6, 0.8) are generated independently. We also rescale the capacities of +the inputs, pools, and outputs and the costs of the arcs for better numerical performance. Note that while +all variables in the formulation are non-negative, upper bounds on the variables are not necessarily equal to +one after rescaling. After constructing a nominal instance using the above procedure, we use it to generate +1000 random pooling instances by randomly perturbing each input’s quality concentration (parameters θ for +this problem family) by up to 20%, uniformly and independently. +5.2 +Machine learning approximation of strong partitioning +We detail our off-the-shelf ML approximation of strong partitioning in this section. Although our ultimate +goal is to optimize the ML model so that its predictions yield good performance when they are used to +inform Alpine’s partitions at the first iteration, we instead choose our ML model solely based on its accuracy +of predicting the strong partitioning points. We do so because tuning the hyperparameters of the ML model +directly for good performance within Alpine incurs a huge computational expense due to the need to re- +evaluate the performance of the ML predictions within Alpine for each choice of the hyperparameters. While +the choice of the ML model can have a significant impact on the performance of its predictions within Alpine, +we leave the design of more sophisticated ML architectures for future work. +5https://github.com/poolinginstances/poolinginstances +14 + +Scaled MAE +< 0.01 +< 0.02 +< 0.05 +< 0.1 +< 0.2 +% Partitioning Points +Bilinear n = 10 +60 +75 +80 +95 +100 +Bilinear n = 20 +15 +22.5 +60 +87.5 +97.5 +Bilinear n = 50 +31 +39 +70 +94 +100 +QCQP n = 10 +65 +80 +95 +100 +100 +QCQP n = 20 +35 +37.5 +77.5 +92.5 +100 +QCQP n = 50 +56 +66 +85 +99 +100 +Pooling +65.7 +70.9 +78.6 +89.5 +97.2 +Table 1: Statistics of scaled MAEs of the out-of-sample predictions of the ML model. +We use scikit-learn’s AdaBoost regressor6 [26] that implements the “AdaBoost.R2 algorithm” [23] to learn +a mapping from each QCQP instance to the strong partitioning points. Our base estimator is a scikit-learn +regression tree7 [15] with maximum depth equal to 25, and we set the maximum number of weak learners for +the boosting algorithm to 1000. The rest of scikit-learn’s AdaBoostRegressor options are set to default. We +use 10-fold cross-validation to generate out-of-sample ML predictions for all 1000 QCQP instances in each +problem family. Specifically, we randomly split the 1000 instances in each family into 10 folds, use 9 out +of 10 folds for training the ML model, predict the strong partitioning points for the omitted fold, and loop +through different choices of the omitted fold to generate predictions for all 1000 instances. We emphasize +that we fit our ML model for prediction accuracy and do not perform much hyperparameter tuning since +our ultimate goal is good performance of the ML predictions when used within Alpine. +ML model inputs and outputs. +The choice of features for the ML model can greatly impact its perfor- +mance. We use the following problem features as inputs to the ML model: i. parameters θ, which uniquely +parametrize each nonconvex QCQP instance, ii. the best found feasible solution during Alpine’s presolve step +(which involves a single local solve), and iii. the McCormick lower bounding solution (obtained by solving +a single convex program). Although it is theoretically sufficient to use only the parameters θ as features +because they uniquely identify each QCQP instance, we also use the features (ii) and (iii) since they are +relatively cheap to compute and intuitively can help inform the partitioning strategy. These additional fea- +tures are also complicated transformations of the instance parameters θ that may otherwise be challenging +to uncover. The outputs of our ML model are the d partitioning points (excluding variable bounds) for +each of the n partitioned variables, resulting in an output dimension of d × n. In contrast with much of the +literature on learning for MILPs, we train separate ML models for each family of 1000 instances since both +the feature and output dimensions of our ML models depend on the problem dimensions. While we plan to +design more advanced ML architectures that can accommodate variable feature and output dimensions as +part of future work, we do not consider the need to train a different ML model for each problem family to be +a major limitation. This is because decision-makers often care about solving instances of the same problem +family with only a few varying parameters, which means they only need to train a single ML model with +fixed feature and output dimensions for their application. +We now summarize the out-of-sample prediction errors of our trained ML models when they are used to +predict two strong partitioning points per partitioned variable (excluding variable bounds). Table 1 provides +statistics of the scaled mean absolute errors (MAEs) of the out-of-sample predictions of the 2n partitioning +points (248 points for the pooling problem) produced by the ML model for each problem family. The MAEs +of the predicted partitioning points are averaged over the 1000 instances in each family and scaled by the +upper bounds of the corresponding x variables—these upper bounds are simply equal to one for the random +bilinear and QCQP instances, but are greater than one for some of the partitioned variables in the pooling +6https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html +7https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html +15 + +Problem Family +BARON Solution Time (seconds) +Shifted GM +Median +Min +Max +# TLE +TLE Gap (GM) +Bilinear n = 10 +0.2 +0.2 +0.1 +0.4 +0 +Bilinear n = 20 +3.5 +3.6 +1.4 +7.0 +0 +Bilinear n = 50 +257.1 +260.6 +54.3 +4637.4 +0 +QCQP n = 10 +0.3 +0.3 +0.1 +0.6 +0 +QCQP n = 20 +4.8 +4.6 +1.6 +10.8 +0 +QCQP n = 50 +268.2 +246.2 +14.7 +6897.9 +19 +2.3 × 10−2 +Pooling +441.4 +422.1 +16.6 +7114.5 +432 +2.7 × 10−2 +Table 2: Statistics of BARON solution times, including the shifted geometric mean, median, minimum, and +maximum times over the subset of 1000 instances for which BARON does not hit the time limit. The last +two columns denote the number of instances for which BARON hits the time limit and the corresponding +geometric mean of residual optimality gaps at termination, respectively. +Solution Method +Solution Time (seconds) +Shifted GM +Median +Min +Max +# TLE +Alpine (default) +30.7 +14.1 +0.4 +4020.9 +2 +Alpine+SP2 +9.4 +1.7 +0.2 +3864.9 +1 +Alpine+SP4 +5.8 +1.4 +0.2 +3871.3 +0 +Table 3: (Benchmark QCQPs) Statistics of solution times. Columns correspond to the shifted geometric +mean, median, minimum, and maximum times over the subset of 140 instances that do not hit the time +limit. The last column denotes the number of instances for which each method hits the time limit. +instances. Roughly 90% or higher of the partitioning points predicted using ML have a scaled MAE of less +than 10% for each problem family, which indicates that the same underlying ML model is able to generate +reasonable predictions of the strong partitioning points across these different problem families. +5.3 +Results and discussion +We begin by benchmarking the hardness of our instances using BARON. We then compare the performance +of default Alpine with the use of strong partitioning and its ML approximation (described in Section 5.2) +within Alpine through a few metrics. All reported times are in seconds and do not include the time for +solving the max-min problem (SP) or training the ML model. +5.3.1 +Benchmarking using BARON +To illustrate the non-trivial nature of our instances, we present statistics of their run times using BARON +in Table 2. BARON solves the 10 variable and 20 variable random bilinear and QCQP instances within +seconds, but takes over 4 minutes on average to solve the 50 variable instances and times out on 19/1000 +of the 50 variable QCQP instances. BARON finds the random pooling instances to be significantly harder, +timing out on 432/1000 instances8 and taking roughly 7 minutes to solve the remaining 568/1000 instances +on average. As suggested in the literature, we use the shifted geometric mean as one of the metrics to +compare the solution times of different algorithms on a family of test instances. The shifted geometric mean +8BARON finds global solutions but is unable to prove global optimality within the time limit. +16 + +(shifted GM) of a positive vector t ∈ RN ++ is defined as9: +Shifted GM(t) = exp +� 1 +N +N +� +i=1 +ln +� +max(1, ti + shift) +�� +− shift, +where we set shift = 10 when comparing solution times in seconds. The last column in Table 2 notes the GM +of the relative optimality gap at termination for instances where BARON hits the time limit. Following the +definition of relative optimality gap in Alpine, this residual optimality gap is defined as +UB−LB +10−6+|UB|, where UB +and LB are the upper and lower bounds returned by BARON at termination. We emphasize that our goal +is not to compare the different versions of Alpine with BARON but rather to illustrate that our instances +and accelerations of Alpine are non-trivial. +5.3.2 +Evaluating strong partitioning on benchmark QCQPs +We compare Alpine’s default partitioning strategy with the use of two/four strong partitioning points exclud- +ing the bounds (Alpine+SP2 and Alpine+SP4, respectively) on a subset of BARON’s QCQP test library10 [7]. +Specifically, we only consider the 140 QCQP instances from Bao et al. [7] with 20 variables in order to keep +the time for solving the max-min problem manageable. Table 3 presents statistics of the run times of default +Alpine, Alpine+SP2, and Alpine+SP4 on these 140 instances. Alpine+SP2 and Alpine+SP4 are able to +reduce the shifted GM of Alpine’s solution time by factors11 of 3.3 and 5.3, respectively, which indicates that +strong partitioning has the potential to result in significant speedups on broad families of QCQPs. Table 8 +reports statistics of the solution times for the max-min problem over these 140 instances. +5.3.3 +Evaluating the performance of strong partitioning and its ML approximation +We compare Alpine’s default partitioning strategy with the use of two strong partitioning points (excluding +variable bounds) per partitioned variable (Alpine+SP2) and its ML approximation (Alpine+ML2) in Alpine’s +first iteration. For the cases with n = 20, we also compare the above approaches with the use of four strong +partitioning points (excluding the bounds) per partitioned variable (Alpine+SP4) and its ML approximation +(Alpine+ML4) at Alpine’s first iteration. We compare these methods for each family of instances using two +metrics: i. statistics of solution times, and ii. statistics of the effective optimality gap after Alpine’s first +iteration. We define the effective relative optimality gap as +Effective Optimality Gap = max +� +10−4, v∗ − vLBD +10−6 + |v∗| +� +, +(10) +where v∗ is the optimal objective value, vLBD is Alpine’s lower bound after one iteration (using any one +of the different approaches for specifying partitions), and 10−4 is the target optimality gap. By measuring +the gap of vLBD relative to the optimal objective value v∗ instead of the best found feasible solution, we +do not let the performance of the local solver impact our evaluation of the different partitioning methods. +Thresholding the optimality gap at 10−4 also lends equal importance to all optimality gaps less than the +target since all such gaps are sufficient for Alpine to converge. +Table 4 presents statistics of run times of default Alpine, Alpine with the different versions of strong +partitioning at the first iteration, and Alpine with the different ML approximations of strong partitioning at +the first iteration for the different problem families. Table 5 records the speedup/slowdown of the different +versions of Alpine+SP and Alpine+ML over default Alpine. +Table 6 presents statistics of the effective +optimality gaps (10) of the different approaches after one iteration, whereas Table 7 notes the GM of +the residual effective optimality gaps on instances for which the different approaches hit the time limit. +Table 8 reports statistics of the solution times for the max-min problem for the different problem families. +9http://plato.asu.edu/ftp/shgeom.html +10These 140 “qcqp2” instances are from https://minlp.com/nlp-and-minlp-test-problems +11These factors correspond to 69.7% and 81.1% average reductions in Alpine’s solution times. +17 + +Problem Family +Solution Method +Solution Time (seconds) +Shifted GM +Median +Min +Max +# TLE +Alpine (default) +0.51 +0.47 +0.14 +2.41 +0 +Bilinear n = 10 +Alpine+SP2 +0.11 +0.10 +0.06 +0.28 +0 +Alpine+ML2 +0.15 +0.10 +0.06 +1.64 +0 +Alpine (default) +21.4 +21.9 +5.1 +161.5 +0 +Alpine+SP2 +4.2 +2.0 +0.8 +132.6 +0 +Bilinear n = 20 +Alpine+ML2 +10.0 +7.8 +1.1 +116.0 +0 +Alpine+SP4 +2.4 +1.9 +0.8 +94.2 +0 +Alpine+ML4 +9.3 +7.2 +1.0 +117.4 +0 +Alpine (default) +405.9 +336.2 +48.0 +7135.9 +24 +Bilinear n = 50 +Alpine+SP2 +52.8 +34.9 +4.2 +5705.1 +4 +Alpine+ML2 +101.6 +83.6 +6.6 +7071.7 +5 +Alpine (default) +0.85 +0.81 +0.62 +2.29 +0 +QCQP n = 10 +Alpine+SP2 +0.10 +0.09 +0.07 +0.27 +0 +Alpine+ML2 +0.27 +0.12 +0.07 +2.89 +0 +Alpine (default) +40.1 +35.6 +4.6 +241.1 +0 +Alpine+SP2 +7.7 +1.7 +0.8 +135.4 +0 +QCQP n = 20 +Alpine+ML2 +13.0 +9.5 +1.0 +180.1 +0 +Alpine+SP4 +2.4 +1.5 +0.7 +125.7 +0 +Alpine+ML4 +9.4 +6.4 +0.9 +101.2 +0 +Alpine (default) +391.5 +289.1 +36.6 +7198.2 +0 +QCQP n = 50 +Alpine+SP2 +63.3 +51.9 +4.2 +6055.2 +0 +Alpine+ML2 +100.5 +118.2 +5.3 +6514.2 +0 +Alpine (default) +242.8 +212.5 +25.9 +7091.9 +7 +Pooling +Alpine+SP2 +66.7 +49.7 +1.6 +6127.1 +5 +Alpine+ML2 +117.1 +101.9 +11.4 +6097.0 +1 +Table 4: (Solution Times) Statistics of solution times. Columns correspond to the shifted geometric mean, +median, minimum, and maximum times over the subset of 1000 instances that did not hit the time limit. +The last time column denotes the number of instances for which each method hits the time limit. +Figures 2, 3, and 4 plot solution profiles and histograms of the factor improvements of the effective optimality +gaps for the bilinear, QCQP, and pooling families. We do not plot performance profiles due to their known +issues (see http://plato.asu.edu/bench.html). +Bilinear Instances. +Table 4 implies Alpine+SP2 is able to reduce the shifted GM of default Alpine’s +solution time by factors of 4.5, 5.1, and 7.7, respectively, for n = 10, n = 20, and n = 50 over 1000 instances. +Alpine+ML2 is able to generate a moderate approximation of Alpine+SP2 overall, reducing the shifted GM +of default Alpine’s solution time by factors of 3.5, 2.1, and 4, respectively, for n = 10, n = 20, and n = 50 +over the same 1000 instances. For the n = 20 instances, Alpine+SP4 and Alpine+ML4 reduce the shifted +GM of default Alpine’s solution time by factors of 9 and 2.3, respectively. Table 5 implies Alpine+SP2 +results in at least 5× speedup over default Alpine on 41.3% of the n = 10 instances, and results in at least +10× speedup on 39.9% and 46.1% of the n = 20 and n = 50 instances, respectively. On the other hand, +Alpine+ML2 yields at least 5× speedup over default Alpine on 40.1%, 22.2%, and 45.2% of the n = 10, +n = 20, and n = 50 instances. Alpine+SP4 results in at least 10× speedup over default Alpine on 51.7% of +the n = 20 instances. Finally, Alpine+SP2 results in a maximum speedup of 15×, 49×, and 685× for the +18 + +0.1 +0.2 +0.5 +1 +2 +3 +Time T (seconds) +0 +20 +40 +60 +80 +100 +% instances solved within time T +n = 10 +Default +SP2 +ML2 +0.5 +2 +5 +20 +50 +200 +Time T (seconds) +0 +20 +40 +60 +80 +100 +n = 20 +Default +SP2 +ML2 +SP4 +ML4 +2 +5 +20 50 +200 500 2000 7200 +Time T (seconds) +0 +20 +40 +60 +80 +100 +n = 50 +Default +SP2 +ML2 +1 +2 +3 +5 +10 +20 +50 +Gap reduction factor (1st iteration) +0 +10 +20 +30 +40 +50 +% of instances +n = 10 +Default/SP2 +Default/ML2 +5 +20 +50 +200 500 +2000 5000 +Gap reduction factor (1st iteration) +0 +10 +20 +30 +40 +50 +60 +70 +n = 20 +Default/SP2 +Default/ML2 +1 +2 +5 +20 +50 +200 500 +2000 +Gap reduction factor (1st iteration) +0 +10 +20 +30 +40 +50 +60 +n = 50 +Default/SP2 +Default/ML2 +Figure 2: (Bilinear Instances) Top row: solution profiles indicating the percentage of instances solved by +the different methods within time T seconds (higher is better). Bottom row: histogram plots of the ratios +of the effective optimality gaps (10) of default Alpine with Alpine+SP2 and with Alpine+ML2 after one +iteration (larger gap reduction factors are better). +19 + +0.1 +0.2 +0.5 +1 +2 +3 +Time T (seconds) +0 +20 +40 +60 +80 +100 +% instances solved within time T +n = 10 +Default +SP2 +ML2 +0.5 +2 +5 +20 +50 +200 +Time T (seconds) +0 +20 +40 +60 +80 +100 +n = 20 +Default +SP2 +ML2 +SP4 +ML4 +2 +5 +20 50 +200 500 2000 7200 +Time T (seconds) +0 +20 +40 +60 +80 +100 +n = 50 +Default +SP2 +ML2 +0.05 +0.2 +0.5 +1 +2 3 +5 +20 +50 +Gap reduction factor (1st iteration) +0 +20 +40 +60 +80 +% of instances +n = 10 +Default/SP2 +Default/ML2 +1 +5 +20 +50 +200 500 +2000 +Gap reduction factor (1st iteration) +0 +10 +20 +30 +40 +n = 20 +Default/SP2 +Default/ML2 +1 +2 +5 +20 +50 +200 500 +Gap reduction factor (1st iteration) +0 +10 +20 +30 +40 +50 +n = 50 +Default/SP2 +Default/ML2 +Figure 3: (QCQP Instances) Top row: solution profiles indicating the percentage of instances solved by +the different methods within time T seconds (higher is better). Bottom row: histogram plots of the ratios +of the effective optimality gaps (10) of default Alpine with Alpine+SP2 and with Alpine+ML2 after one +iteration (larger gap reduction factors are better). +2 +5 +20 50 +200 500 2000 7200 +Time T (seconds) +0 +20 +40 +60 +80 +100 +% instances solved within time T +Default +SP2 +ML2 +1 +2 +5 +20 +50 +200 +500 +Gap reduction factor (1st iteration) +0 +10 +20 +30 +40 +% of instances +Default/SP2 +Default/ML2 +Figure 4: (Pooling Instances) Left plot: solution profile indicating the percentage of instances solved by +the different methods within time T seconds (higher is better). Right plot: histogram plots of the ratios +of the effective optimality gaps (10) of default Alpine with Alpine+SP2 and with Alpine+ML2 after one +iteration (larger gap reduction factors are better). +20 + +Problem Family +Solution Method +Speedup/Slowdown Factor +< 0.5 +0.5 − 1 +1 − 2 +2 − 5 +5 − 10 +10 − 20 +20 − 50 +> 50 +Bilinear n = 10 +% Alpine+SP2 inst. +1.1 +57.6 +40.1 +1.2 +0 +0 +% Alpine+ML2 inst. +0.2 +2.1 +7.7 +49.9 +40.0 +0.1 +0 +0 +% Alpine+SP2 inst. +0.2 +3.3 +7.2 +18.2 +31.2 +29.9 +10.0 +0.0 +Bilinear n = 20 +% Alpine+ML2 inst. +3.3 +9.8 +25.5 +39.2 +15.3 +6.0 +0.9 +0.0 +% Alpine+SP4 inst. +0.2 +0.7 +1.3 +13.4 +32.7 +37.1 +14.5 +0.1 +% Alpine+ML4 inst. +2.8 +10.5 +23.3 +41.4 +15.2 +5.9 +0.9 +0.0 +Bilinear n = 50 +% Alpine+SP2 inst. +0.4 +1.3 +7.2 +18.7 +26.3 +24.3 +14.9 +6.9 +% Alpine+ML2 inst. +0.7 +4.7 +16.9 +32.5 +25.3 +13.7 +5.4 +0.8 +QCQP n = 10 +% Alpine+SP2 inst. +0.1 +3.3 +76.1 +20.4 +0.1 +0 +% Alpine+ML2 inst. +1.0 +3.9 +20.9 +8.5 +53.4 +12.3 +0 +0 +% Alpine+SP2 inst. +0.1 +3.2 +12.2 +18.4 +11.5 +19.4 +32.6 +2.6 +QCQP n = 20 +% Alpine+ML2 inst. +0.5 +5.1 +19.0 +40.7 +23.1 +9.6 +1.9 +0.1 +% Alpine+SP4 inst. +0 +0.2 +1.3 +3.8 +5.5 +28.2 +53.7 +7.3 +% Alpine+ML4 inst. +0 +2.9 +11.6 +33.3 +27.0 +17.2 +7.6 +0.4 +QCQP n = 50 +% Alpine+SP2 inst. +0.9 +1.3 +10.7 +22.0 +23.0 +32.5 +7.2 +2.4 +% Alpine+ML2 inst. +1.4 +4.0 +19.5 +32.4 +22.7 +16.6 +3.4 +0 +Pooling +% Alpine+SP2 inst. +2.2 +6.4 +19.7 +26.0 +21.8 +16.8 +6.7 +0.4 +% Alpine+ML2 inst. +2.1 +11.5 +34.5 +40.4 +9.8 +1.4 +0.3 +0 +Table 5: (Speedup/Slowdown) Statistics of the speedup/slowdown of the different versions of Alpine with +SP and its ML approximation (relative to default Alpine). +n = 10, n = 20, and n = 50 instances, whereas Alpine+ML2 results in a maximum speedup of 13×, 38×, +and 197× for the same sets of instances. +Table 6 implies Alpine+SP2 reduces the GM of default Alpine’s effective optimality gap (10) after the +first iteration by factors of 5.5, 2200, and 80, respectively, for n = 10, n = 20, and n = 50. Alpine+ML2 +reduces the GM of default Alpine’s effective gap after the first iteration by factors of 4.6, 180, and 15, +respectively, for the n = 10, n = 20, and n = 50 instances. Interestingly, Alpine+SP2 is able to close the +effective gap in the first iteration for 100%, 82.3%, and 46% of the n = 10, n = 20, and n = 50 instances, +whereas default Alpine is able to close the gap in the first iteration for at most 0.1% of the instances for these +different problem families, which demonstrates the effectiveness of the strong partitioning strategy. Finally, +Table 7 shows that Alpine+SP2 and Alpine+ML2 terminate with smaller average optimality gaps on the +n = 50 instances where they time out compared to Alpine. +QCQP Instances. +Table 4 implies Alpine+SP2 is able to reduce the shifted GM of default Alpine’s +solution time by factors of 8.4, 5.2, and 6.2, respectively, for n = 10, n = 20, and n = 50. Alpine+ML2 +is able to generate a moderate approximation of Alpine+SP2, reducing the shifted GM of default Alpine’s +solution time by factors of 3.1, 3.1, and 3.9, respectively, for n = 10, n = 20, and n = 50 over the same +1000 instances. For the n = 20 instances, Alpine+SP4 and Alpine+ML4 reduce the shifted GM of default +Alpine’s solution time by factors of 16.4 and 4.3, respectively. Table 5 implies Alpine+SP2 results in at +least 10× speedup over default Alpine on 20.5%, 54.6%, and 42.1% of the n = 10, n = 20, and n = 50 +instances, respectively. On the other hand, Alpine+ML2 yields at least 5× speedup over default Alpine on +65.7%, 34.7%, and 42.7% of the n = 10, n = 20, and n = 50 instances. Alpine+SP4 results in at least 20× +speedup over default Alpine on 61% of the n = 20 instances. Finally, Alpine+SP2 results in a maximum +speedup of 22×, 87×, and 98× for the n = 10, n = 20, and n = 50 instances, whereas Alpine+ML2 results +in a maximum speedup of 19×, 56×, and 32× for the same sets of instances. +21 + +Problem Family +Solution Method +Optimality Gap after 1 iteration +% Instances +GM +Median +Min +Max +Gap Closed +Alpine (default) +5.5 × 10−4 +4.5 × 10−4 +10−4 +3.4 × 10−2 +0.1 +Bilinear n = 10 +Alpine+SP2 +10−4 +10−4 +10−4 +10−4 +100 +Alpine+ML2 +1.2 × 10−4 +10−4 +10−4 +4.4 × 10−2 +88.3 +Alpine (default) +2.9 × 10−1 +3.3 × 10−1 +7.3 × 10−2 +4.8 × 10−1 +0 +Alpine+SP2 +1.3 × 10−4 +10−4 +10−4 +6 × 10−3 +82.3 +Bilinear n = 20 +Alpine+ML2 +1.6 × 10−3 +1.9 × 10−3 +10−4 +1.4 × 10−1 +18.9 +Alpine+SP4 +1.0 × 10−4 +10−4 +10−4 +4.7 × 10−4 +96.0 +Alpine+ML4 +2.2 × 10−3 +3.6 × 10−3 +10−4 +9.9 × 10−2 +14.5 +Alpine (default) +1.4 × 10−2 +1.7 × 10−2 +10−4 +6.9 × 10−2 +0.1 +Bilinear n = 50 +Alpine+SP2 +1.7 × 10−4 +1.2 × 10−4 +10−4 +5.4 × 10−1 +46.0 +Alpine+ML2 +9.5 × 10−4 +9.4 × 10−4 +10−4 +4.9 × 10−1 +5.6 +Alpine (default) +1.3 × 10−3 +1.2 × 10−3 +7.5 × 10−4 +1.9 × 10−2 +0 +QCQP n = 10 +Alpine+SP2 +10−4 +10−4 +10−4 +10−4 +100 +Alpine+ML2 +3.0 × 10−4 +10−4 +10−4 +1.3 × 10−1 +71.8 +Alpine (default) +6.3 × 10−2 +7.8 × 10−2 +3.0 × 10−3 +2.1 × 10−1 +0 +Alpine+SP2 +2.1 × 10−4 +10−4 +10−4 +6.6 × 10−3 +52.2 +QCQP n = 20 +Alpine+ML2 +2.0 × 10−3 +2.5 × 10−3 +10−4 +5.8 × 10−2 +2.0 +Alpine+SP4 +1.1 × 10−4 +10−4 +10−4 +3.6 × 10−3 +92.6 +Alpine+ML4 +1.5 × 10−3 +1.7 × 10−3 +10−4 +6.7 × 10−2 +14.7 +Alpine (default) +8.1 × 10−3 +1.0 × 10−2 +6.3 × 10−4 +2.8 × 10−2 +0 +QCQP n = 50 +Alpine+SP2 +1.6 × 10−4 +1.3 × 10−4 +10−4 +1.0 × 10−3 +39.0 +Alpine+ML2 +4.8 × 10−4 +5.3 × 10−4 +10−4 +1.5 × 10−2 +14.9 +Alpine (default) +6.8 × 10−3 +6.4 × 10−3 +1.2 × 10−3 +4.4 × 10−2 +0 +Pooling +Alpine+SP2 +2.4 × 10−4 +1.4 × 10−4 +10−4 +3.1 × 10−3 +45.2 +Alpine+ML2 +1.5 × 10−3 +1.6 × 10−3 +10−4 +6.3 × 10−3 +0.1 +Table 6: (Effective Optimality Gaps) Statistics of effective optimality gaps (10) after one iteration (note: +minimum possible value = 10−4, the target gap). Columns record the geometric mean, median, minimum, +and maximum effective gaps over 1000 instances. The last column is the percentage of instances for which +each method results in the minimum possible effective optimality gap of 10−4 after one iteration. +Table 6 implies Alpine+SP2 reduces the GM of default Alpine’s effective optimality gap (10) after the +first iteration by factors of 13, 300, and 50, respectively, for the n = 10, n = 20, and n = 50 instances. On +the other hand, Alpine+ML2 reduces the GM of default Alpine’s effective gap after the first iteration by +factors of 4.3, 31, and 17, respectively, for n = 10, n = 20, and n = 50. Note that Alpine+SP2 is able to +close the effective gap in the first iteration for 100%, 52.2%, and 39% of the n = 10, n = 20, and n = 50 +instances, whereas default Alpine is unable to close the gap in the first iteration for any instance in these +problem families. +Pooling Instances. +Table 4 implies Alpine+SP2 and Alpine+ML2 reduce the shifted GM of default +Alpine’s solution time by factors of 3.6 and 2.1 over the 1000 instances. +Table 5 implies Alpine+SP2 +and Alpine+ML2 result in at least 5× speedup over default Alpine on 45.7% and 11.5% of the instances, +respectively. Table 6 implies Alpine+SP2 and Alpine+ML2 reduce the GM of default Alpine’s effective +optimality gap (10) after the first iteration by factors of 28 and 4.5, respectively. After the first iteration, +Alpine+SP2 closes the effective optimality gap for 45.2% of the instances, whereas default Alpine is unable +22 + +Problem Family +Bilinear n = 50 +Pooling +Method +Alpine (default) +Alpine+SP2 +Alpine+ML2 +Alpine (default) +Alpine+SP2 +Alpine+ML2 +TLE Gap (GM) +4.4 × 10−4 +1.6 × 10−4 +1.8 × 10−4 +2.9 × 10−4 +2.1 × 10−4 +2.8 × 10−4 +Table 7: (Effective TLE Optimality Gaps) Geometric mean of residual effective optimality gaps (target += 10−4) on instances for which methods hit the time limit. +Problem Family +Solution Method +Max-Min Solution Time (seconds) +Shifted GM +Median +Min +Max +Std. Dev. +Bilinear n = 10 +SP2 +16 +14 +6 +96 +13 +Bilinear n = 20 +SP2 +528 +445 +136 +2389 +544 +SP4 +1244 +1117 +374 +4360 +893 +Bilinear n = 50 +SP2 +7070 +7404 +1271 +23166 +3268 +QCQP n = 10 +SP2 +8 +8 +6 +53 +3 +QCQP n = 20 +SP2 +1731 +1826 +171 +4244 +654 +SP4 +2152 +2740 +471 +5965 +961 +QCQP n = 50 +SP2 +16964 +17074 +8626 +23551 +2319 +Pooling +SP2 +15658 +15148 +1088 +77029 +8657 +Benchmark QCQPs +SP2 +413 +364 +7 +27907 +4432 +SP4 +895 +651 +12 +136320 +15444 +Table 8: (Max-Min Solution Times) Statistics of max-min solution times. Columns correspond to the +shifted geometric mean, median, minimum, maximum, and standard deviation of times for solving the max- +min problem (SP). +to close the gap for any of the 1000 instances. Finally, Alpine+SP2 and Alpine+ML2 result in maximum +speedups of 120× and 41×. +In summary, Tables 4 to 6 and Figures 2 to 4 clearly show the benefits of strong partitioning and its +ML approximation over Alpine’s default partitioning strategy. They also demonstrate that Alpine+SP and +Alpine+ML are able to match or even outperform (particularly on the pooling instances) the performance +of the state-of-the-art solver BARON (with default options) on average over the different problem families. +While our off-the-shelf ML model is able to yield a moderate approximation of SP across these different +problem families, there is a clear scope for significant improvement with tailored ML approaches. +6 +Future work +There are several interesting avenues for future work. First, instead of prespecifying the number of partition- +ing points per variable for SP, we could allocate a different number of partitions per variable based on their +relative impact on the lower bound. Suppose we wish to specify at most d + 2 partitioning points for each +variable and are given a budget B ∈ [d × n] for the total number of partitioning points across all variables +(excluding variable bounds). We can solve the following max-min problem to determine both the optimal +allocation of partitions and the optimal specification of partitioning points across the partitioned variables: +max +(P,Z)∈Pz v(P), +where P := (p1, p2, . . . , pn) denotes the (potential) partitioning points, v(P) is defined in (PMR-OA), and +Z := (z1, z2, . . . , zn) is a d×n matrix of binary decisions. The partitioning point pi +j is added to the partition +23 + +Pi of xi only if the variable zi +j takes the value 1. Finally, the MILP representable set PZ is defined as +PZ := +� +(P, Z) ∈ P × {0, 1}d×n : +n +� +i=1 +d +� +j=1 +zi +j = B, zi +j = 0 =⇒ pi +j = 0, ∀(i, j) ∈ [n] × [d] +� +. +If zi +j = 0, then the partitioning point pi +j is made redundant by forcing it to 0. Note that the above outer- +maximization problem involves binary decision variables Z unlike the strong partitioning problem (SP), +which necessitates new techniques for its solution. +Second, designing more efficient approaches for solving the strong partitioning problem (SP) would help +scale our approach to larger problem dimensions (and also make it easier to generating more training data). +Third, designing tailored ML architectures that can achieve similar speedups as strong partitioning and can +accommodate variable feature and output dimensions merits investigation. Fourth, motivated by the cluster +problem [34, 35], it would be interesting to explore variants that choose a different subset of variables to be +partitioned at each iteration within Alpine. Finally, using strong partitioning to choose Alpine’s partitions +at the second iteration and beyond can help promote convergence of its bounds in fewer iterations. +Acknowledgments +The authors gratefully acknowledge funding from Los Alamos National Laboratory’s (LANL’s) “Center for +Nonlinear Studies” and the U.S. Department of Energy’s “Laboratory Directed Research and Development +(LDRD)” program under the projects “20230091ER: Learning to Accelerate Global Solutions for Non-convex +Optimization” and “20210078DR: The Optimization of Machine Learning: Imposing Requirements on Ar- +tificial Intelligence.” This research used resources provided by LANL’s Darwin testbed, which is funded by +the Computational Systems and Software Environments subprogram of LANL’s Advanced Simulation and +Computing program (NNSA/DOE). +References +[1] T. Achterberg. Constraint integer programming. PhD thesis, Technischen Universit¨at Berlin, 2007. +[2] F. A. Al-Khayyal and J. E. Falk. Jointly constrained biconvex programming. Mathematics of Operations Research, 8(2): +273–286, 1983. +[3] M. Alfaki and D. Haugland. A multi-commodity flow formulation for the generalized pooling problem. Journal of Global +Optimization, 56(3):917–937, 2013. +[4] A. M. Alvarez, Q. Louveaux, and L. Wehenkel. A machine learning-based approximation of strong branching. INFORMS +Journal on Computing, 29(1):185–195, 2017. +[5] M.-F. Balcan, T. Dick, T. Sandholm, and E. Vitercik. +Learning to branch. +In International Conference on Machine +Learning, pages 344–353. PMLR, 2018. +[6] R. +Baltean-Lugojan, +P. +Bonami, +R. +Misener, +and +A. +Tramontani. +Scoring +positive +semidefinite +cut- +ting +planes +for +quadratic +optimization +via +trained +neural +networks. +Optimization +Online, +2019. +URL +https://optimization-online.org/2018/11/6943/. +[7] X. Bao, N. V. Sahinidis, and M. Tawarmalani. Semidefinite relaxations for quadratically constrained quadratic program- +ming: A review and comparisons. Mathematical Programming, 129(1):129–157, 2011. +[8] P. Belotti, J. Lee, L. Liberti, F. Margot, and A. W¨achter. Branching and bounds tightening techniques for non-convex +MINLP. Optimization Methods & Software, 24(4-5):597–634, 2009. +[9] Y. Bengio, A. Lodi, and A. Prouvost. Machine learning for combinatorial optimization: a methodological tour d’horizon. +European Journal of Operational Research, 290(2):405–421, 2021. +[10] M. L. Bergamini, I. Grossmann, N. Scenna, and P. Aguirre. An improved piecewise outer-approximation algorithm for the +global optimization of MINLP models involving concave and bilinear terms. Computers & Chemical Engineering, 32(3): +477–493, 2008. +[11] K. Bestuzheva, M. Besan¸con, W.-K. Chen, A. Chmiela, T. Donkiewicz, J. van Doornmalen, L. Eifler, O. Gaul, G. Gamrath, +A. Gleixner, et al. The SCIP optimization suite 8.0. arXiv preprint: 2112.08872, 2021. +[12] D. Bienstock, M. Escobar, C. Gentile, and L. Liberti. Mathematical programming formulations for the alternating current +optimal power flow problem. Annals of Operations Research, pages 1–39, 2022. +24 + +[13] A. Billionnet, S. Elloumi, and A. Lambert. Extending the QCR method to general mixed-integer programs. Mathematical +Programming, 131(1):381–401, 2012. +[14] P. Bonami, A. Lodi, and G. Zarpellon. Learning a classification of mixed-integer quadratic programming problems. In +International Conference on the Integration of Constraint Programming, Artificial Intelligence, and Operations Research, +pages 595–604. Springer, 2018. +[15] L. Breiman, J. H. Friedman, R. A. Olshen, and C. J. Stone. Classification and regression trees. Routledge, 2017. +[16] S. Burer and D. Vandenbussche. A finite branch-and-bound algorithm for nonconvex quadratic programming via semidef- +inite relaxations. Mathematical Programming, 113(2):259–282, 2008. +[17] Q. Cappart, D. Ch´etelat, E. Khalil, A. Lodi, C. Morris, and P. Veliˇckovi´c. Combinatorial optimization and reasoning with +graph neural networks. arXiv preprint: 2102.09544, 2021. +[18] P. M. Castro. +Normalized multiparametric disaggregation: an efficient relaxation for mixed-integer bilinear problems. +Journal of Global Optimization, 64(4):765–784, 2016. +[19] F. Cengil, H. Nagarajan, R. Bent, S. Eksioglu, and B. Eksioglu. Learning to accelerate globally optimal solutions to the +AC optimal power flow problem. Electric Power Systems Research, 212:108275, 2022. +[20] A. Costa, P. Hansen, and L. Liberti. On the impact of symmetry-breaking constraints on spatial branch-and-bound for +circle packing in a square. Discrete Applied Mathematics, 161(1-2):96–106, 2013. +[21] D. De Wolf and Y. Smeers. +Generalized derivatives of the optimal value of a linear program with respect to matrix +coefficients. European Journal of Operational Research, 291(2):491–496, 2021. +[22] G. Di Liberto, S. Kadioglu, K. Leo, and Y. Malitsky. DASH: Dynamic approach for switching heuristics. European Journal +of Operational Research, 248(3):943–953, 2016. +[23] H. Drucker. Improving regressors using boosting techniques. In ICML, volume 97, pages 107–115. Citeseer, 1997. +[24] M. Etheve, Z. Al`es, C. Bissuel, O. Juan, and S. Kedad-Sidhoum. Reinforcement learning for variable selection in a branch +and bound algorithm. In International Conference on Integration of Constraint Programming, Artificial Intelligence, and +Operations Research, pages 176–185. Springer, 2020. +[25] R. M. Freund. Postoptimal analysis of a linear program under simultaneous changes in matrix coefficients. In Mathematical +Programming Essays in Honor of George B. Dantzig Part I, pages 1–13. Springer, 1985. +[26] Y. Freund and R. E. Schapire. +A decision-theoretic generalization of on-line learning and an application to boosting. +Journal of Computer and System Sciences, 55(1):119–139, 1997. +[27] M. Gasse, D. Ch´etelat, N. Ferroni, L. Charlin, and A. Lodi. Exact combinatorial optimization with graph convolutional +neural networks. Advances in Neural Information Processing Systems, 32, 2019. +[28] B. Ghaddar, I. G´omez-Casares, J. Gonz´alez-D´ıaz, B. Gonz´alez-Rodr´ıguez, B. Pateiro-L´opez, and S. Rodr´ıguez-Ballesteros. +Learning for spatial branching: An algorithm selection approach. arXiv preprint arXiv:2204.10834, 2022. +[29] B. Gonz´alez-Rodr´ıguez, R. Alvite-Paz´o, S. Alvite-Paz´o, B. Ghaddar, and J. Gonz´alez-D´ıaz. +Polynomial optimization: +Enhancing RLT relaxations with conic constraints. arXiv preprint arXiv:2208.05608, 2022. +[30] C. A. Haverly. Studies of the behavior of recursion for the pooling problem. ACM SIGMAP Bulletin, 25:19–28, 1978. +[31] H. He, H. Daume III, and J. M. Eisner. Learning to search in branch and bound algorithms. Advances in Neural Information +Processing Systems, 27:3293–3301, 2014. +[32] J. Im. Sensitivity analysis and robust optimization: A geometric approach for the special case of linear optimization. +Master’s thesis, University of Waterloo, 2018. +[33] R. Kannan. Algorithms, analysis and software for the global optimization of two-stage stochastic programs. PhD thesis, +Massachusetts Institute of Technology, 2018. +[34] R. Kannan and P. I. Barton. The cluster problem in constrained global optimization. Journal of Global Optimization, 69 +(3):629–676, 2017. +[35] R. Kannan and P. I. Barton. Convergence-order analysis of branch-and-bound algorithms for constrained problems. Journal +of Global Optimization, 71(4):753–813, 2018. +[36] E. Khalil, P. Le Bodic, L. Song, G. Nemhauser, and B. Dilkina. Learning to branch in mixed integer programming. In +Proceedings of the AAAI Conference on Artificial Intelligence, volume 30, 2016. +[37] J. Kim, J.-P. P. Richard, and M. Tawarmalani. Piecewise polyhedral relaxations of multilinear optimization. Optimization +Online, 2022. URL http://www.optimization-online.org/DB_HTML/2022/07/8974.html. +[38] S. Kolodziej, P. M. Castro, and I. E. Grossmann. +Global optimization of bilinear programs with a multiparametric +disaggregation technique. Journal of Global Optimization, 57(4):1039–1063, 2013. +[39] T. C. Koopmans and M. Beckmann. Assignment problems and the location of economic activities. Econometrica: Journal +of the Econometric Society, pages 53–76, 1957. +[40] J. Kotary, F. Fioretto, P. Van Hentenryck, and B. Wilder. End-to-end constrained optimization learning: A survey. arXiv +preprint: 2103.16378, 2021. +[41] M. Lee, N. Ma, G. Yu, and H. Dai. Accelerating generalized Benders decomposition for wireless resource allocation. IEEE +25 + +Transactions on Wireless Communications, 2020. +[42] X. Li, E. Armagan, A. Tomasgard, and P. I. Barton. Stochastic pooling problem for natural gas production network design +and operation under uncertainty. AIChE Journal, 57(8):2120–2135, 2011. +[43] Y. Lin and L. Schrage. The global solver in the LINDO API. Optimization Methods & Software, 24(4-5):657–668, 2009. +[44] J. Liu, N. Ploskas, and N. V. Sahinidis. Tuning BARON using derivative-free optimization algorithms. Journal of Global +Optimization, 74(4):611–637, 2019. +[45] A. Lodi and G. Zarpellon. On learning and branching: a survey. Top, 25(2):207–236, 2017. +[46] M. Lu, H. Nagarajan, R. Bent, S. D. Eksioglu, and S. J. Mason. Tight piecewise convex relaxations for global optimization +of optimal power flow. In 2018 Power Systems Computation Conference, pages 1–7. IEEE, 2018. +[47] J. Luedtke, C. d’Ambrosio, J. Linderoth, and J. Schweiger. Strong convex nonlinear relaxations of the pooling problem. +SIAM Journal on Optimization, 30(2):1582–1609, 2020. +[48] M. M. M¨akel¨a. Multiobjective proximal bundle method for nonconvex nonsmooth optimization: Fortran subroutine mpb- +ngc 2.0. url: http://napsu.karmitsa.fi/publications/pbncgc_report.pdf. Reports of the Department of Mathematical +Information Technology, Series B. Scientific Computing, B, 13, 2003. +[49] G. P. McCormick. Computability of global solutions to factorable nonconvex programs: Part I—convex underestimating +problems. Mathematical Programming, 10(1):147–175, 1976. +[50] R. Misener and C. A. Floudas. GloMIQO: Global mixed-integer quadratic optimizer. Journal of Global Optimization, 57 +(1):3–50, 2013. +[51] R. Misener and C. A. Floudas. ANTIGONE: algorithms for continuous/integer global optimization of nonlinear equations. +Journal of Global Optimization, 59(2):503–526, 2014. +[52] H. Nagarajan, M. Lu, E. Yamangil, and R. Bent. Tightening McCormick relaxations for nonlinear programs via dynamic +multivariate partitioning. In International conference on principles and practice of constraint programming, pages 369–387. +Springer, 2016. +[53] H. Nagarajan, M. Lu, S. Wang, R. Bent, and K. Sundar. +An adaptive, multivariate partitioning algorithm for global +optimization of nonconvex programs. Journal of Global Optimization, 74(4):639–675, 2019. +[54] V. Nair, S. Bartunov, F. Gimeno, I. von Glehn, P. Lichocki, I. Lobov, B. O’Donoghue, N. Sonnerat, C. Tjandraatmadja, +P. Wang, R. Addanki, T. Hapuarachchi, T. Keck, J. Keeling, P. Kohli, I. Ktena, Y. Li, O. Vinyals, and Y. Zwols. Solving +mixed integer programs using neural networks. arXiv preprint: 2012.13349, 2020. +[55] G. Nannicini, P. Belotti, J. Lee, J. Linderoth, F. Margot, and A. W¨achter. A probing algorithm for MINLP with failure +prediction by SVM. In International Conference on AI and OR Techniques in Constriant Programming for Combinatorial +Optimization Problems, pages 154–169. Springer, 2011. +[56] C. J. Nohra, A. U. Raghunathan, and N. Sahinidis. Spectral relaxations and branching strategies for global optimization +of mixed-integer quadratic programs. SIAM Journal on Optimization, 31(1):142–171, 2021. +[57] F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, +V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: Machine +learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011. +[58] N. V. Sahinidis. BARON: A general purpose global optimization software package. Journal of global optimization, 8(2): +201–205, 1996. +[59] Y. Saif, A. Elkamel, and M. Pritzker. +Global optimization of reverse osmosis network for wastewater treatment and +minimization. Industrial & Engineering Chemistry Research, 47(9):3060–3070, 2008. +[60] H. D. Sherali and W. P. Adams. A reformulation-linearization technique for solving discrete and continuous nonconvex +problems, volume 31. Springer Science & Business Media, 2013. +[61] G. +Still. +Lectures +on +parametric +optimization: +An +introduction. +Optimization +Online, +2018. +URL +https://optimization-online.org/2018/04/6587/. +[62] K. Sundar, H. Nagarajan, J. Linderoth, S. Wang, and R. Bent. Piecewise polyhedral formulations for a multilinear term. +Operations Research Letters, 49(1):144–149, 2021. +[63] S. A. Vavasis. Quadratic programming is in NP. Information Processing Letters, 36(2):73–77, 1990. +[64] F. G. V´azquez, J.-J. R¨uckmann, O. Stein, and G. Still. Generalized semi-infinite programming: a tutorial. Journal of +Computational and Applied Mathematics, 217(2):394–419, 2008. +[65] D. S. Wicaksono and I. A. Karimi. Piecewise MILP under- and overestimators for global optimization of bilinear programs. +AIChE Journal, 54(4):991–1008, 2008. +[66] Y. Yang and P. I. Barton. Integrated crude selection and refinery optimization under uncertainty. AIChE journal, 62(4): +1038–1053, 2016. +[67] G. Zarpellon, J. Jo, A. Lodi, and Y. Bengio. Parameterizing branch-and-bound search trees to learn branching policies. +In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 3931–3939, 2021. +26 + diff --git a/_NAyT4oBgHgl3EQfdvce/content/tmp_files/load_file.txt b/_NAyT4oBgHgl3EQfdvce/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5052f6a6fce9809e7f75e3b44b0b6d94dc925bf --- /dev/null +++ b/_NAyT4oBgHgl3EQfdvce/content/tmp_files/load_file.txt @@ -0,0 +1,1916 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf,len=1915 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='00306v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='OC] 31 Dec 2022 Learning to Accelerate Partitioning Algorithms for the Global Optimization of Nonconvex Quadratically-Constrained Quadratic Programs Rohit Kannan1,2, Harsha Nagarajan2, and Deepjyoti Deka2 1Center for Nonlinear Studies (T-CNLS), Los Alamos National Laboratory, Los Alamos, NM, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2Applied Mathematics & Plasma Physics (T-5), Los Alamos National Laboratory, Los Alamos, NM, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' E-mail: {rohitk@alum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='mit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='edu, harsha@lanl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='gov, deepjyoti@lanl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='gov} Abstract We learn optimal instance-specific heuristics to accelerate partitioning algorithms for solving noncon- vex quadratically-constrained quadratic programs (QCQPs) to global optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Specifically, we propose the novel problem of strong partitioning to optimally partition the domains of variables participating in nonconvex terms within a QCQP without sacrificing guarantees of global optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We then design a local optimization method for solving this challenging max-min strong partitioning problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Because solving this max-min problem to local optimality may still be time consuming, we propose to use machine learning (ML) to learn this strategy on homogeneous families of QCQPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We present a detailed computa- tional study on randomly generated families of QCQPs, including instances of the pooling problem, using the open-source global solver Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Our numerical experiments demonstrate that strong partitioning and its ML approximation significantly reduce Alpine’s solution time by factors of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 − 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 and 2 − 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 on average and by maximum factors of 15−700 and 10−200, respectively, over different QCQP families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Key words: Quadratically-Constrained Quadratic Program, Piecewise McCormick Relaxations, Global Optimization, Machine Learning, Strong Partitioning, Sensitivity Analysis, Pooling Problem 1 Introduction Many real-world applications involve the repeated solution of the same high-level semantic quadratically- constrained quadratic program (QCQP) with slightly varying model parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Examples include the pooling problem with varying input qualities [50] and the cost-efficient operation of the power grid with varying loads and renewable sources [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' These hard optimization problems are typically solved using off-the-shelf global optimization software [8, 11, 43, 51, 53, 58] that do not exploit the shared problem structure—heuristics within these implementations are engineered to work well on average over a diverse set of instances and their performance may be sub-optimal for instances from a specific application [44].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Recent work [9, 45] has shown that tailoring branching decisions can significantly accelerate branch-and- bound (B&B) algorithms for mixed-integer linear programs (MILPs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In contrast, only a few papers (see Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2) attempt to use machine learning (ML) to accelerate the guaranteed global solution of nonconvex nonlinear programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We investigate ML-based approaches for accelerating partitioning algorithms for the global minimization of nonconvex QCQPs, such as those implemented within the solver Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine [52, 53] is a Julia-based open-source solver for the global optimization of mixed-integer polynomial problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It computes a sequence of upper and lower bounds on the optimal objective value of the nonconvex problem and terminates when these bounds converge to within a prescribed tolerance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It determines feasible solutions and corresponding upper bounds by solving the nonconvex problem using a local solver.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' What distinguishes Alpine from most 1 global solvers [8, 11, 43, 51, 58] is an iterative, partitioning-based lower bounding algorithm that determines lower bounds on the optimal objective value using piecewise convex relaxations [53].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine initializes its lower bounding algorithm by applying heuristics to select for partitioning a subset of the continuous variables participating in nonconvex terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' At each iteration, it adaptively refines the partitions of the domains of the selected variables and updates the piecewise convex relaxations in its lower bounding formulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It then solves a convex mixed-integer program (MIP) [46, 62] to determine a lower bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine uses heuristics to specify the locations of partitioning points and continues to refine its variable partitions until the lower and upper bounds converge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Since the complexity of Alpine’s MIP-based lower bounding problem can grow significantly at each iteration, the choice of partitioning points in the initial iterations can have a huge impact on its overall performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proposed approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We propose to learn how to optimally partition the domains of variables partic- ipating in nonconvex terms within Alpine without sacrificing guarantees of global optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Similar to the concept of strong branching in B&B algorithms for MIPs [1, 8], we propose the novel concept of strong partitioning to choose Alpine’s partitioning points in the first iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The key idea of strong partitioning is to determine a specified number of partitioning points per variable so that the resulting piecewise convex relaxation-based lower bound is maximized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' If Alpine does not converge in its first iteration after strong partitioning is used, we revert to using its default partitioning strategy (see Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 for details) in the sub- sequent iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We formulate strong partitioning as a max-min problem, where the outer-maximization chooses the partitioning points and the inner-minimization solves the piecewise relaxation-based lower bound- ing problem for a given partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We solve this max-min problem to local optimality by using generalized gradient information of the value function of the inner-minimization problem within a bundle solver for nonsmooth nonconvex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Because each iteration of the bundle method requires the solution of a MIP, solving this max-min problem may be computationally expensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Therefore, we propose to use ML to learn the strong partitioning strategy on families of homogeneous QCQPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Numerical experiments on ran- domly generated QCQPs and instances of the pooling problem demonstrate that using strong partitioning to initialize Alpine’s variable partitions can significantly reduce its solution time by a factor of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 − 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 on average and by a maximum factor of 15 − 700 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' They also illustrate that an off-the-shelf ML model is able to learn the strong partitioning strategy approximately, reducing Alpine’s solution time by a factor of 2 − 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 on average and by a maximum factor of 10 − 200 over the same set of instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Section 2 reviews related work on the use of ML to accelerate the guaranteed global solution of MILPs, nonconvex nonlinear programs (NLPs), and mixed-integer nonlinear programs (MINLPs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Section 3 outlines the implementation of partitioning-based algorithms for nonconvex QCQPs within Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Section 4 introduces strong partitioning and designs an algorithm for its solution with theoretical guarantees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Section 5 presents detailed computational results demonstrating the effectiveness of using strong partitioning and an off-the-shelf ML approximation within Alpine for randomly generated QCQPs, including instances of the pooling problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We conclude with directions for future research in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let [n] := {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', n} and R+ denote the set of non-negative reals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We write vi to denote the ith component of vector v = (v1, v2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , vn) ∈ Rn, Mij to denote the (i, j)th component of matrix M, and |S|, int(S), and conv(S) to denote the cardinality, interior, and convex hull of a set S, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2 Related work Optimization solvers tune key algorithmic parameters by extensive testing on benchmark libraries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' However, they typically only consider a narrow family of efficiently computable heuristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Moreover, they do not tailor these heuristics to each problem instance but seek a universal setting for good average solver performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Machine learning, on the other hand, can enable efficient approximations of better performing but expensive 2 heuristics, potentially leading to significant computational gains for hard test instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Several recent papers survey the burgeoning field of using ML to accelerate MILP and combinatorial optimization algorithms [9, 17, 40, 45].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In the next sections, we review related approaches on learning to branch for MILPs and learning to accelerate the guaranteed solution of (MI)NLPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Learning to branch for MILPs Branch-and-bound and its variants form the backbone of modern MILP solvers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Besides heuristics for determining good feasible solutions, selecting the branching variable at each node of the B&B tree is probably the most impactful decision in terms of the run time of the algorithm [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Typically, a subset of the integer variables with fractional lower bounding solutions at a particular node are considered as the candidate branching variables for that node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The gold standard branching variable selection heuristic is full strong branching (FSB), which chooses as the branching variable the one that leads to the maximum product of improvements in the lower bounds of the two children nodes (assuming they are both feasible).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' FSB results in a 65% reduction in the number of nodes explored by the B&B tree (relative to the default branching strategy) on average over standard test instances;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' however, this comes with a 44% increase in the cost-per-node that is not tenable [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' MILP solvers therefore tend to use computationally cheaper heuristic approximations of FSB such as reliability, pseudocost, or hybrid branching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Motivated by FSB’s promise, most approaches for learning to branch for MILPs aim to develop a computationally efficient ML approximation that retains its attractive performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alvarez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [4] propose several hand-crafted features to construct an ML approximation of FSB using Extremely Randomized Trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Khalil et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [36] propose an instance-specific on-the-fly ML approximation of FSB using Support Vector Machines (SVMs) and use the learned approximation for subsequent branching decisions in the same problem instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gasse et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [27] and Nair et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [54] design graph neural network (GNN) approximations of FSB that leverages the bipartite graph representation of MILPs and removes the need for feature engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Zarpellon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [67] seek to learn branching policies that generalize to heterogeneous MILPs by explicitly parameterizing the state of the B&B tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A few works (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', [24, 31]) consider online and reinforcement learning approaches for making branching decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Some others [5, 22] combine existing heuristics to come up with better branching decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Learning for (MI)NLPs To the best of our knowledge, there are only a few approaches in the literature for accelerating the guaranteed global solution of nonconvex NLPs and MINLPs using ML.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Baltean-Lugojan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [6] consider the global solution of nonconvex QCQPs using semi-definite program- ming (SDP) relaxations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' They use ML to construct good outer-approximations of these SDP relaxations to mitigate the computational burden of SDP solvers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' They train a neural network to select cuts based on their sparsity and predicted impact on the objective, and show that their approach results in computationally cheap relaxations that can be effectively integrated into global solvers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ghaddar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [28] consider branching variable selection for a B&B search tree that is embedded within the reformulation-linearization technique (RLT) for solving polynomial problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' They use ML to choose the “best branching strategy” from a portfolio of branching rules (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Di Liberto et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [22]) based on violations of RLT-defining identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' They design several hand-crafted features and pick the branching strategy that optimizes a quantile regression forest-based approximation of their performance indicator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gonz´alez-Rodr´ıguez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [29] consider a portfolio of second-order cone and SDP constraints to strengthen the RLT formulation for polynomial problems and use ML to select constraints to add within a B&B framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bonami et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [14] train classifiers to predict whether linearizing products of binary variables or binary and bounded continuous variables may be computationally advantageous for solving MIQPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nannicini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [55] train an SVM classifier to decide if an expensive optimality-based bounds tightening (OBBT) routine should be used in lieu of a cheaper feasibility-based routine for nonconvex MINLPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Cengil et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [19] consider the AC optimal power flow problem and train a deep neural network to identify a small subset of lines and 3 buses for which the reduced-cost OBBT routine is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [41] use classification and regression techniques to identify effective cuts for the generalized Benders decomposition master problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 3 Partitioning-based bounds for QCQPs Consider the nonconvex QCQP min x∈[0,1]n xTQ0x + (r0)Tx (1) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' xTQix + (ri)Tx ≤ bi, ∀i ∈ [mI], where rk ∈ Rn, k ∈ {0} ∪ [mI], Qk ∈ Rn×n, k ∈ {0} ∪ [mI], are symmetric but not necessarily positive semi- definite, and b ∈ RmI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' QCQPs with equality constraints and different variable bounds can be handled using simple transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Polynomial optimization problems may also be reformulated as QCQPs through the addition of variables and constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It is well known that nonconvex QCQPs are NP-hard [63].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' QCQPs arise in several applications (see [50] for a detailed list) such as facility location [39], refinery optimization [33, 66], electric grid optimization [12], and circle packing [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' By introducing variables and constraints, we can reformulate (1) into the following equivalent form: v∗ := min x∈[0,1]n,w cTx + dTw (QCQP) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ax + Bw ≤ b, wij = xixj, ∀(i, j) ∈ B, wkk = x2 k, ∀k ∈ Q, for some vectors c and d, matrices A and B, and index sets B ⊂ {(i, j) ∈ [n]2 : i ̸= j} and Q ⊂ [n] of (pairs of) variables participating in bilinear and univariate quadratic terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Define the set F := {(x, w) : x ∈ [0, 1]n, Ax + Bw ≤ b} for convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We assume (QCQP) is feasible for simplicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' One of the earliest approaches for constructing lower bounds on the optimal value of (QCQP) uses termwise McCormick relaxations [2, 49] to yield the following lower bounding problem: min (x,w)∈F cTx + dTw (2) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (xi, xj, wij) ∈ MB ij, ∀(i, j) ∈ B, (xk, wkk) ∈ MQ k , ∀k ∈ Q, where the bilinear and quadratic equality constraints in (QCQP) have been replaced with the following valid convex relaxations on x ∈ [0, 1]n for each (i, j) ∈ B and k ∈ Q: MB ij := {(xi, xj, wij) : 0 ≤ wij ≤ xi, xi + xj − 1 ≤ wij ≤ xj}, MQ k := {(xk, wkk) : x2 k ≤ wkk ≤ xk}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Problem (2) can be used within a spatial B&B framework for solving (QCQP) to global optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Several papers (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', [7, 10, 13, 16, 56, 59, 60, 65]) improve upon the termwise McCormick bound, usually at an increase in the computational cost but with the goal of reducing the overall time taken by the B&B algorithm to converge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In this work, we consider the so-called piecewise McCormick relaxation approach [10, 18, 38, 53, 59, 65] for strengthening the termwise McCormick relaxations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Piecewise McCormick relaxations begin by partitioning the domains of variables participating in non- convex terms into sub-intervals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Assume for simplicity that we wish to partition the domain of each xi into d + 1 sub-intervals with d ≥ 1 (general partitioning schemes can be handled similarly).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' For each i ∈ [n], let Pi := [pi 0, pi 1, pi 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , pi d, pi d+1], with 0 =: pi 0 ≤ pi 1 ≤ pi 2 ≤ · · · ≤ pi d ≤ pi d+1 := 1, 4 −1 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1 −1 0 1 −1 0 1 x1 x2 w12 −1 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1 −1 0 1 −1 0 1 x1 x2 w12 x1 w11 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1 Figure 1: The left and middle plots illustrate the lower and upper parts, respectively, of the piecewise McCormick relaxation for the bilinear term w12 = x1x2 on the domain x1, x2 ∈ [−1, 1] (domain changed from [0, 1] for better illustration) with partitions P1 = P2 = {−1, 0, 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The right plot illustrates the piecewise McCormick relaxation for the quadratic term w11 = x2 1 on the domain x1 ∈ [0, 1] (the lower part coincides with the red quadratic curve) with the partition P1 = {0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5, 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' denote the array of d + 2 partitioning points for variable xi, including the original variable bounds 0 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Given partitions Pi, i ∈ [n], the piecewise McCormick relaxation-based lower bounding problem to (QCQP) can be abstractly written as min (x,w)∈F cTx + dTw (3) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (xi, xj, wij) ∈ PMRB ij(Pi, Pj), ∀(i, j) ∈ B, (xk, wkk) ∈ PMRQ k (Pk), ∀k ∈ Q, where PMRB ij(Pi, Pj) and PMRQ k (Pk) denote the feasible regions corresponding to the piecewise Mc- Cormick relaxations of the bilinear equation wij = xixj and the quadratic equation wk = x2 k, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' While there are several ways of formulating the piecewise McCormick relaxations, Alpine uses the so-called “convex combination” or “lambda” formulation that we detail below (see [37] for enhancements in the multi- linear setting).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The piecewise McCormick relaxation for the bilinear constraint wij = xixj can be represented as follows [62]: PMRB ij(Pi, Pj) := � (xi, xj, wij) : ∃λij ∈ R(d+2)2 + , yi ∈ {0, 1}(d+1), yj ∈ {0, 1}(d+1) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (xi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' xj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' wij,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' λij,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' yi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' yj) satisfies (4a) − (4d) � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' where xi = d+1 � k,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='l=0 λij k(d+2)+l+1pi l,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' xj = d+1 � k,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='l=0 λij k(d+2)+l+1pj k,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' wij = d+1 � k,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='l=0 λij k(d+2)+l+1pi lpj k,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (4a) d+1 � k=1 yi k = 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' d+1 � k=1 yj k = 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (d+2)2 � k=1 λij k = 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (4b) d+1 � k=0 λij k(d+2)+1 ≤ yi 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' d+2 � k=1 λij k(d+2) ≤ yi d+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' d+1 � k=0 λij k(d+2)+l+1 ≤ yi l + yi l+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ∀l ∈ [d],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (4c) d+2 � k=1 λij k ≤ yj 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' d+2 � k=1 λij (d+2)2−k ≤ yj d+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' d+2 � k=1 λij l(d+2)+k ≤ yj l + yj l+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ∀l ∈ [d].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (4d) Note that only equations (4a) depend on the partitions Pi and Pj of xi and xj, respectively, which are parameters in these equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Additionally, the binary vectors yi and yj denote the active partition of xi 5 and xj—these variables may be reused in the piecewise McCormick relaxations of other nonconvex terms involving xi or xj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The piecewise McCormick relaxation for the quadratic constraint wkk = x2 k can be represented as fol- lows [46, 53]: PMRQ k (Pk) := � (xk, wkk) : ∃λk ∈ R(d+2) + , yk ∈ {0, 1}(d+1) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (xk, wkk, λk, yk) satisfies (5a) − (5c) � , where xk = d+1 � l=0 λk l+1pk l , wkk ≤ d+1 � l=0 λk l+1(pk l )2, d+1 � l=1 yk l pk l−1 ≤ xk ≤ d+2 � l=2 yk l−1pk l−1, (5a) d+1 � l=1 yk l = 1, d+2 � l=1 λk l = 1, wkk ≥ x2 k, (5b) λk 1 ≤ yk 1, λk d+2 ≤ yk d+1, λk l+1 ≤ yk l + yk l+1, ∀l ∈ [d].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (5c) Note that only equations (5a) depend on the partition Pk of xk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Additionally, equations (5b) involve convex quadratic functions of xk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Figure 1 illustrates the piecewise McCormick relaxations for a bilinear and a univariate quadratic term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Using the above representations of PMRB ij and PMRQ k , we obtain the following extended convex MIP formulation for the piecewise McCormick relaxation-based lower bound to (QCQP): min (x,w)∈F, λ≥0, y∈Y cTx + dTw (PMR) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (xi, xj, wij, λij, yi, yj) satisfies (4a) − (4d), ∀(i, j) ∈ B, (xk, wkk, λk, yk) satisfies (5a) − (5c), ∀k ∈ Q, where Y := {y ∈ {0, 1}n×(d+1) : �d+1 l=1 yi l = 1, ∀i ∈ [n]} is a special-ordered set of type 1, variables λ comprise λij, (i, j) ∈ B, and λk, k ∈ Q, and variables y comprise yk, k ∈ {i : ∃j ∈ [n]s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (i, j) ∈ B}∪Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Problem (PMR) is a convex mixed-integer QCQP (MIQCQP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We further relax (PMR) by outer-approximating the convex quadratic terms in equation (5b) to obtain the following MILP relaxation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that for purely bilinear problems (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', |Q| = 0), problem (PMR) is already an MILP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' v(P) := min (x,w)∈F, λ≥0, y∈Y cTx + dTw (PMR-OA) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (xi, xj, wij, λij, yi, yj) satisfies (4a) − (4d), ∀(i, j) ∈ B, (xk, wkk, λk, yk) satisfies (5a) and (5c), ∀k ∈ Q, d+2 � l=1 λk l = 1, wkk ≥ 2αk j xk − (αk j )2, ∀j ∈ J , k ∈ Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' (6) We explicitly indicate the dependence of the piecewise McCormick lower bound v on the d × n matrix of partitioning points P := (p1, p2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , pn), excluding the bounds 0 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Constraints (6) outer-approximate the quadratic inequalities wkk ≥ x2 k in equation (5b) at the points {αk j }j∈J ⊂ [0, 1] (we assume both 0 and 1 are elements of {αk j }).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We only use the outer-approximation (PMR-OA) for strong partitioning and revert to solving problem (PMR) while computing lower bounds within Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In preparation for Section 4, we recast (PMR-OA) in the following abstract form for suitably defined vectors ¯b and ¯c, matrix ¯B, matrix-valued function M with co-domain Rnr×nc, and variables z (that includes x, w, λ, and slack variables): v(P) := min y∈Y v(P, y), (7) v(P, y) := min z≥0 ¯cTz (8) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M(P, y)z = ¯By + ¯b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We omit in problem (8) the right-hand side constraints in equation (5a) for simplicity because they only 6 strengthen the LP relaxation of (PMR-OA) and are redundant for the piecewise McCormick relaxations PMRQ k .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that for any y ∈ Y , at most four of the λij variables in the formulation of each PMRB ij and at most two of the λk variables in the formulation of each PMRQ k may be nonzero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consequently, for each y ∈ Y , we eliminate the variables and equations corresponding to the λ variables that are fixed to zero and let M(P, y) denote the coefficient matrix of the remaining equations (the coefficients of the matrix M(P, y) themselves do not depend on y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Alpine’s partitioning strategy Alpine begins by identifying the bilinear and univariate quadratic terms that occur in the input problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let NC denote the subset of the variables participating in nonconvex terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine solves (QCQP) to local optimality to try and determine a good initial feasible solution and uses heuristics to select for partitioning a subset of variables from NC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It then uses bounds tightening techniques to try and tighten the bounds on variables in NC using termwise McCormick relaxations and the feasible solution found during presolve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Instead of using Alpine’s heuristic for selecting the partitioning variables, we choose to partition the domains of all variables in NC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This is because partitioning only a subset of variables in NC may cause Alpine to suffer from an analogue of the so-called cluster problem in reduced-space global optimization [34, 35], potentially resulting in a significant increase in its number of iterations for convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' At Alpine’s core is an adaptive partitioning approach for constructing piecewise McCormick relaxation- based lower bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This adaptive strategy is motivated by the fact that uniformly partitioning variable domains creates many partitions that do not contribute significantly to improving the lower bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Assume for simplicity that NC = {x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , xn}, Alpine partitions the domains of all n variables, and its bounds tightening routines are deactivated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine adds (up to) two partitioning points per variable at each iteration around a nominal point ˆx ∈ [0, 1]n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It specifies the following partitions { ˆP1 i }i∈[n] in its first iteration: ˆP1 i := � 0, max � 0, ˆxi − 1 ∆ � , min � 1, ˆxi + 1 ∆ � , 1 � , ∀i ∈ [n], where ∆ ≥ 4 is a user-specified partitioning parameter with a default value of 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The point ˆx is set to the feasible local solution from presolve if one is found or to a solution to the termwise McCormick relaxation (2) otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that if ˆxi − ∆−1 ≤ 0, then Alpine does not add the corresponding point to the partition ˆP1 i of xi in practice (a similar comment holds if ˆxi + ∆−1 ≥ 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The parameter ∆ is a dimensionless scaling factor for the size of the partition constructed around ˆx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Larger values of ∆ result in a narrower partition around ˆx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This choice of initial partitions is motivated by two factors: i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' local solvers are often able to find high-quality feasible solutions during presolve, and ii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' partitioning methods (lower bounding methods in general) inevitably have to explore the regions around global minimizers for the lower bound to converge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' At subsequent iterations k > 1, Alpine refines the active partition of each partitioned variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose Alpine’s partition ˆPk−1 i = [ˆpi 0, ˆpi 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , ˆpi 2k−2, ˆpi 2k−1] for variable xi at iteration k − 1, where 0 =: ˆpi 0 ≤ ˆpi 1 ≤ · · · ˆpi 2k−2 ≤ ˆpi 2k−1 := 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let ¯xk−1 denote the x-component of a solution to the piecewise McCormick relaxation-based lower bounding problem (PMR) at iteration k − 1 with the above variable partitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We say that the jth partition [ˆpi j−1, ˆpi j] of variable xi is active at iteration k − 1 (relative to the solution ¯xk−1) if ˆpi j−1 ≤ ¯xk−1 i ≤ ˆpi j, or (equivalently) if there exists an optimal solution to (PMR) such that yi j = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let A(i, k − 1) denote the index of an active partition of variable xi at iteration k − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' At iteration k, Alpine refines the active partition [ˆpi A(i,k−1)−1, ˆpi A(i,k−1)] of each partitioned variable xi, i ∈ [n], around the lower bounding solution ¯xk−1 i as follows: ˆPk i := � ˆpi 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , ˆpi A(i,k−1)−1, max � ˆpi A(i,k−1)−1, ¯xk−1 i − width(A(i, k − 1)) ∆ � , min � ˆpi A(i,k−1), ¯xk−1 i + width(A(i, k − 1)) ∆ � , ˆpi A(i,k−1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , ˆpi 2k−1 � , 7 where width(A(i, k−1)) := ˆpi A(i,k−1) − ˆpi A(i,k−1)−1 is the width of the active partition of xi at iteration k−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The above setting for the partition ˆPk i can be viewed as a generalization of the setting for the partition ˆP1 i at Alpine’s first iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A motivation for adding partitioning points around the solution ¯xk−1 stems from the fact that the piecewise McCormick relaxations need to be refined around this (infeasible) solution in order to be able to exclude it from the feasible region of (PMR) at iteration k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This heuristic partitioning strategy was chosen because it empirically performs well on numerous test instances, particularly for instances of the pooling problem [53].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine continues to refine its variable partitions until its upper and lower bounds on the optimal objective value of (QCQP) converge to within a specified tolerance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 4 Strong partitioning for nonconvex QCQPs The choice of partitioning points in the initial iterations can greatly impact Alpine’s lower bounds, number of iterations for convergence, and overall solution time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' While there are some motivations for Alpine’s default partitioning strategy, it is still ad hoc for a few reasons: it uses the same parameter ∆ to partition the domains of all variables, and it only considers symmetric partitions around the reference point ˆx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The quality of Alpine’s initial partitions also depend on the quality of the feasible solution determined during presolve, with sub-optimal or infeasible presolve solutions potentially leading to sub-optimal initial partitions and slow convergence overall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Hence, we propose strong partitioning (SP) to address the above limitations of Alpine’s partitioning strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The concept of strong partitioning is akin to strong branching in B&B algorithms for MILPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Strong branching for MILPs only chooses the branching variable (a discrete choice) at a node to maximize some function of the lower bound improvements at its two children nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Strong partitioning, on the other hand, chooses partitioning points for each partitioned variable (continuous choices within the variable domains) such that the resulting piecewise McCormick relaxation lower bound is maximized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It can be formulated as the following max-min problem: P ∗ ∈ arg max P ∈P v(P), (SP) where v(P) is the value function of (PMR-OA) and the set P is defined as P := � P ∈ [0, 1]d×n : 0 ≤ pi 1 ≤ pi 2 ≤ · · · ≤ pi d ≤ 1, ∀i ∈ [n] � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The strong partitioning problem (SP) is challenging to solve even to local optimality because the inner- minimization problem (PMR-OA) includes binary decisions and its feasible region depends on P (variables of the outer-maximization).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' While (SP) can be formulated as a generalized semi-infinite program [64], state-of-the-art global optimization algorithms for this problem class do not scale even for moderate prob- lem dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Therefore, we design a local optimization method for (SP) with the hope of determining partitioning points ¯P ∈ P that yield a tight lower bound v( ¯P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use generalized gradients of the value function of the inner-minimization (PMR-OA) within a bundle solver for nonsmooth nonconvex optimization to solve problem (SP) to local optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Although the value function of an MILP might be discontinuous in general, (PMR-OA) possesses special structure because (outer- approximations of) piecewise McCormick relaxations are nonconvex piecewise-linear continuous functions (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Figure 1), which allows for the computation of sensitivity information in this setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The bundle solver, MPBNGC [48], that we use requires function and generalized gradient evaluations at points P ∈ P during the course of its algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Each function evaluation v(P) requires the solution of the MILP (PMR-OA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Under suitable assumptions, a generalized gradient ∂P v(P) can be obtained by fixing y to an optimal y solution of (PMR-OA) and computing a generalized gradient of the resulting LP (8) using parametric sensitivity theory [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We formalize these details in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Before we proceed, we include the convergence guarantees of MPBNGC [48] below for the sake of completeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let Z ⊂ RN be open.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A locally Lipschitz function f : Z → R is said to be weakly semismooth if the directional derivative f ′(z, d) = limt↓0 f(z+td)−f(z) t exists for all z ∈ Z, d ∈ RN and f ′(z, d) = limt↓0 ξ(z + td)Td for ξ(z + td) ∈ ∂f(z + td).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 8 Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let f : RN → R, g : RN → RM be locally Lipschitz continuous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consider the problem minz:g(z)≤0 f(z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A feasible point z∗ is said to be substationary if there exist multipliers λ ≥ 0 and µ ∈ RM + , with (λ, µ) ̸= (0, 0), such that 0 ∈ λ∂f(z∗) + M � j=1 µj∂gj(z∗), µjgj(z∗) = 0, ∀j ∈ [M].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose the function v is weakly semismooth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Then MPBNGC either terminates finitely with a substationary point to (SP), or any accumulation point of a sequence of MPBNGC solutions is a substationary point to (SP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' See Theorem 9 of [48].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The following example shows that the value function v may be nonsmooth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Example 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consider the following instance of the QCQP (1): min x∈[0,1] x s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' x2 ≥ (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Clearly, the optimal solution is x∗ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 with optimal value v∗ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose we wish to partition the domain of x into two sub-intervals (d = 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let P = [0, p, 1] denote the partition of x with 0 ≤ p ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' After some algebraic manipulation, the outer-approximation problem (PMR-OA) can be reformulated as v(p) = min x∈[0,1] x s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' w ≥ (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4)2, w ≤ max{px, (1 + p)x − p}, w ≥ 2αjx − α2 j, ∀j ∈ J , where {αk j }j∈J ⊂ [0, 1] and we write v(p) to indicate the dependence on the partitioning point p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We can derive the piecewise McCormick lower bound to be v(p) = � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='16+p 1+p , if 0 ≤ p ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='16 p , if 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 < p ≤ 1 , p v(p) 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 which shows that v is continuous and piecewise differentiable at p = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Computing generalized gradients of v We begin with the following useful result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that the assumption that the y solution of (7) is unique can be verified by adding a “no-good cut” and re-solving (7) to check if the second-best solution for y has a strictly greater objective than v(P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose problem (7) has a unique y solution y∗ ∈ Y at P ∈ P and v(·, y∗) is continuous at P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Then v( ˜P) = v( ˜P, y∗), ∀ ˜P ∈ P in a neighborhood of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Because y∗ is the unique y solution to (7) at P ∈ P, v(P, y∗) < v(P, y), ∀y ∈ Y \\{y∗}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' To see v(·) ≡ v(·, y∗) in a neighborhood of P, we show that the value function v(·, y) is lower semicontinuous on P for each y ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The stated result then holds since v(·, y∗) is assumed to be continuous at P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The set-valued mapping P ∈ P �→ {z ≥ 0 : M(P, y) = ¯By+¯b} is locally compact for each y ∈ Y by virtue of the continuity of the mapping M(·, y) and the finite bounds on all of the variables in problem (PMR-OA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 of Still [61] then implies that v(·, y) is lower semicontinuous on P for each y ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The next result characterizes the gradient of v in the non-degenerate case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 9 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose P ∈ P and problem (7) has a unique y solution y∗ ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consider the LP (8) with y fixed to y∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' If this LP has a unique primal solution z∗ and a unique dual solution π∗, then ∂v ∂pi j (P) = ∂v ∂pi j (P, y∗) = nr � k=1 nc � l=1 −π∗ kz∗ l ∂Mkl ∂pi j (P, y∗), ∀i ∈ [n], j ∈ [d].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lemma 2 implies v(·) ≡ v(·, y∗) in a neighborhood of P provided v(·, y∗) is continuous at P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Theorem 1 of Freund [25] (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proposition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 of [21]) and the fact that the function M(·, y∗) is continuously differentiable on P together imply v(·, y∗) is continuously differentiable at P and the stated equalities hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Next, we derive a formula for the generalized gradient ∂P v(P) when the assumption that the LP (8) is non-degenerate fails to hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose P ∈ P and problem (7) has a unique y solution y∗ ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consider the LP (8) with y fixed to y∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose v(·, y∗) is finite and locally Lipschitz in a neighborhood of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Then ∂P v(P) = ∂P v(P, y∗) = conv � nr � k=1 nc � l=1 −π∗ kz∗ l ∂Mkl ∂P (P, y∗) : (z∗, π∗) is a primal-dual optimal pair for (8) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lemma 2 implies v(·) ≡ v(·, y∗) in a neighborhood of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The stated equalities hold by mirroring the proof of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 of De Wolf and Smeers [21] and noting that the function M(·, y∗) is continuously differentiable on P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' De Wolf and Smeers [21] (see Assumption 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1) and Im [32] argue that the following assumption ensures v(·, y∗) is locally Lipschitz in a neighborhood of P ∈ P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose P ∈ P and ¯y ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consider the LP (8) with y fixed to ¯y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' If the matrix M(P, ¯y) has full row rank and ¯B¯y + ¯b ∈ int � {M(P, ¯y)z : z ≥ 0} � , then v(·, ¯y) is finite and locally Lipschitz in a neighborhood of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' See Proposition 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 of [21] and pages 73 to 76 of [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We now verify that the full rank assumption in Lemma 5 holds in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The matrix M(P, y) has full row rank, ∀P ∈ int(P) and y ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Fix y ∈ Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Since P ∈ int(P), we have 0 < pi 1 < pi 2 < · · · < pi d < 1 for each i ∈ [n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We show that for each (i, j) ∈ B and k ∈ Q, the equality constraints in equations (4a)-(4d) and equations (5a)-(5c) have full row rank, which readily imply that M(P, y) has full row rank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We ignore inequality constraints because they are transformed into equality constraints by the addition of unique slack variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We begin by focusing on the equality constraints in (4a)-(4d) involving the x, w, and λ variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Consider a fixed (i, j) ∈ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Since at most four of the λij variables may be nonzero,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' we can rewrite these equality constraints as follows after a change of variables (here,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A(i) denotes the active partition of xi): � � � � � −1 0 0 pi A(i)−1 pi A(i)−1 pi A(i) pi A(i) 0 −1 0 pj A(j)−1 pj A(j) pj A(j)−1 pj A(j) 0 0 −1 pi A(i)−1pj A(j)−1 pi A(i)−1pj A(j) pi A(i)pj A(j)−1 pi A(i)pj A(j) 0 0 0 1 1 1 1 � � � � � � � � � � � � � � � xi xj wij λij 1 λij 2 λij 3 λij 4 � � � � � � � � � � = � � � � 0 0 0 1 � � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We argue that the following sub-matrix is of full rank whenever P ∈ int(P): \uf8eb \uf8ec \uf8ec \uf8ec \uf8ed pi A(i)−1 pi A(i)−1 pi A(i) pi A(i) pj A(j)−1 pj A(j) pj A(j)−1 pj A(j) pi A(i)−1pj A(j)−1 pi A(i)−1pj A(j) pi A(i)pj A(j)−1 pi A(i)pj A(j) 1 1 1 1 \uf8f6 \uf8f7 \uf8f7 \uf8f7 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 10 Subtracting the first column from the second column, the third from the fourth column, and finally the first from the third column yields the column vectors � pi A(i)−1, pj A(j)−1, pi A(i)−1pj A(j)−1, 1 � , � 0, (pj A(j) − pj A(j)−1), pi A(i)−1(pj A(j) − pj A(j)−1), 0 � , � (pi A(i) − pi A(i)−1), 0, pj A(j)−1(pi A(i) − pi A(i)−1), 0 � , � 0, (pj A(j) − pj A(j)−1), pi A(i)(pj A(j) − pj A(j)−1), 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It is easy to see these vectors are linearly independent if 0 < pi A(i)−1 < pi A(i) < 1, ∀i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Next, we focus on the equality constraints in (5a)-(5c) involving the x, w, and λ variables for a fixed k ∈ Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Since at most two of the λk variables may be nonzero, we can rewrite these equality constraints as follows after a change of variables: �−1 pk A(k)−1 pk A(k) 0 1 1 � \uf8eb \uf8ed xk λk 1 λk 2 \uf8f6 \uf8f8 = �0 1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The last two matrix columns are linearly independent if 0 < pk A(k)−1 < pk A(k) < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, we show that problem (7) has a unique y solution for almost every (a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=') P ∈ P, which ensures that Theorem 3 or 4 is applicable for a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' P ∈ P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose every optimal solution of (1) has at least one active inequality involving nonconvex terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Additionally, suppose the optimal value of the termwise McCormick relaxation (2) is strictly less than v∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Then problem (7) has a unique y solution for a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' P ∈ P (with respect to the uniform measure on P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Let P ∈ P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose problem (7) has optimal y solutions ˜y, ˆy ∈ Y with ˜y ̸= ˆy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Then, there exist (non-singular) basis matrices ˜ M(P, ˜y) and ˆ M(P, ˆy) for the LPs (8) corresponding to ˜y and ˆy, respectively, such that v(P) = v(P, ˜y) = ˜cT[ ˜ M(P, ˜y)]−1( ¯B˜y + ¯b) = ˆcT[ ˆ M(P, ˆy)]−1( ¯Bˆy + ¯b) = v(P, ˆy) (9) for suitable vectors ˜c and ˆc, which only include the components of ¯c corresponding to the basic variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Because at every optimal solution of (1), at least one inequality involving nonconvex terms is active and not all x variables are at their bounds (otherwise, the optimal value of (2) would equal v∗), we may assume without loss of generality that some of the entries of either ˜ M(P, ˜y) or ˆ M(P, ˆy) are functions of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Equation (9) thus yields a polynomial equation in the partitioning points P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Therefore, the set of all P ∈ P such that (9) holds has measure zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Noting that |Y | < +∞ and the number of possible bases is finite for each y ∈ Y concludes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Algorithmic enhancements We design preprocessing and postprocessing steps that can be used to mitigate the computational burden of solving (SP) and enable our ML model to more effectively learn its solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The outer-maximization in problem (SP) involves n × d partitioning variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Since larger problem dimensions may increase both the per-iteration cost and number of iterations taken by the bundle solver to converge, we propose preprocessing heuristics to fix a subset of the partitioning points P and to compute an initial guess P 0 for the bundle method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' After solving the max-min problem (SP) (line 17), we propose postprocessing steps to eliminate partitioning points in its solution ¯P that do not significantly affect the lower bound v( ¯P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Algorithm 1 includes detailed pseudocode of our preprocessing (lines 1–16) and postprocessing steps (lines 18–27).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 11 Algorithm 1 Preprocessing and postprocessing steps Preprocessing steps 1: Initialize partitions P0 i := [0, 1], ∀i ∈ [n] 2: Solve the McCormick relaxation (2) to compute a lower bounding solution ¯x0 3: for k = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', d do 4: for i = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', n do 5: if ¯xk−1 i ≈ ˜xi for some ˜xi ∈ Pk−1 i then 6: Set Pk i = Pk−1 i 7: else 8: Insert ¯xk−1 i in Pk−1 i to obtain Pk i 9: end if 10: end for 11: Solve (PMR-OA) with partitions {Pk i }i∈[n] to determine solution ¯xk 12: end for 13: Let ni := |Pd i | − 2, ∀i ∈ [n] 14: Let [0, pi0 d−ni+1, pi0 d−ni+2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , pi0 d , 1] denote Pd i and set pi0 j := 0, ∀j ∈ [d − ni] 15: Set the initial guess for (SP) to P 0, where P 0 ij := pi0 j , ∀i ∈ [n], j ∈ [d] 16: Fix variables pi j, j ∈ [d − ni], to 0 while solving (SP) 17: Solve max-min problem (SP) to obtain a solution ¯P ∈ P with objective ¯v := v( ¯P) Postprocessing steps 18: for j = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', d do 19: for i = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=',' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' n do 20: Set ˆP = ¯P and replace the element ˆpi j in ˆP with zero 21: Solve (PMR-OA) with partitioning points ˆP to obtain bound ˆv := v( ˆP) 22: if ˆv ≤ ¯v + 10−6|¯v| then 23: Update ¯P = ˆP and sort it such that ¯Pij ≤ ¯Pij+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ∀i ∈ [n],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' j ∈ [d − 1] 24: end if 25: end for 26: end for 27: Return the postprocessed solution ¯P 5 Numerical experiments We study the impact of using strong partitioning to specify Alpine’s variable partitions at the first iteration and investigate an off-the-shelf ML model for learning these partitions for homogeneous QCQPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We begin by describing the setup for our computational experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1, we outline the procedure for generating families of random QCQP instances, including instances of the pooling problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We detail our ML approximation of strong partitioning in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2, and compare the performance of strong partitioning and its ML approximation against Alpine’s default partitioning strategy in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Our strong partitioning code is written in Julia 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 and implemented within Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl v0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use JuMP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 and use Gurobi 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 via Gurobi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl v0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 for solving LPs, MILPs, and convex MIQCQPs (with MIPGap = 10−6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' To solve NLPs locally within Alpine2, we use either Ipopt 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 via Ipopt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 (with max iter = 104), or Artelys Knitro 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 via KNITRO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl v0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 (with algorithm = 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use the bundle solver MPBNGC 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 [48] via MPBNGCInterface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl3 (with OPT LMAX = 20, OPT EPS = 10−9, and 1https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='com/lanl-ansi/Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl 2We switch Alpine’s local solver between Ipopt for the random bilinear and QCQP instances and Knitro for the random pooling instances because Ipopt is ineffective for the pooling instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 3https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='com/milzj/MPBNGCInterface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl 12 OPT NITER = OPT NFASG = 500) to solve the max-min problem (SP) to local optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We consider strong partitioning with either two or four partitioning points per partitioned variable in addition to the variable bounds and use scikit-learn v0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 [57] to design its ML approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' To demonstrate the non-trivial nature of our nonconvex test instances, we also solve them to global optimality using BARON 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 via BARON.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl v0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 and provide BARON with the option of using CPLEX 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 as an MILP solver.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' All of our experiments were run on nodes of the Darwin cluster at LANL with dual socket Intel Broadwell 18-core processors (E5-2695 v4 CPUs, base clock rate at 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1GHz), EDR InfiniBand, and 125GB of memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Each instance was run exclusively on a single node and different solution approaches were run in sequence to limit the impact of variability in machine performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' All Alpine and BARON runs were given a time limit of 2 hours with target relative and absolute optimality gaps of 10−4 and 10−9, respectively4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' No time limit was specified for solving the max-min problem (SP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The rest of BARON’s options, including range reduction options, were kept to default.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We deactivate bounds tightening techniques within Alpine because it is largely ineffective for our medium and large-scale instances (our approaches are easily adapted to the setting where bounds tightening is employed).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We partition the domains of all variables participating in nonconvex terms within Alpine, and set the rest of Alpine’s options to default, including the partition scaling factor to ∆ = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Test Instances We describe how we generate homogeneous families of random QCQPs, including instances of the pooling problem, based on the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Scripts for generating the different families of instances can be found at https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='com/lanl-ansi/Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='jl/tree/master/examples/random_QCQPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Random bilinear programs We consider parametric bilinear programs of the form [7]: v(θ) := min x∈[0,1]n xTQ0(θ)x + (r0(θ))Tx s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' xTQi(θ)x + (ri(θ))Tx ≤ bi, ∀i ∈ [mI], (aj)Tx = dj, ∀j ∈ [mE], where θ ∈ [−1, 1]dθ are parameters, rk(θ) ∈ Rn, k ∈ {0}∪[mI], Qk(θ) ∈ Rn×n, k ∈ {0}∪[mI], are symmetric but not necessarily positive semi-definite, aj ∈ Rn, j ∈ [mE], b ∈ RmI, and d ∈ RmE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We generate 1000 instances for each of n ∈ {10, 20, 50} variables with |B| = min{5n, �n 2 � } bilinear terms (we count xixj and xjxi as the same bilinear term;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' all instances for a fixed dimension n have the same set of |B| bilinear terms), |Q| = 0 quadratic terms, mI = n bilinear inequalities, and mE = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2n linear equalities [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We let the dimension dθ = 3 × (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2mI + 1) (see below for why we make this choice).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The problem data is generated as follows (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [7]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' All entries of the vectors aj and d are generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from the uniform distribution U(−1, 1), and all entries of the vector b are generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from U(0, 100).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The components of θ are generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from U(−1, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Each Qk and rk, k ∈ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2mI}, are of the form: Qk(θ) = ¯Qk + 3k+3 � l=3k+1 θl ˜Qk,l−3k, rk(θ) = ¯rk + 3k+3 � l=3k+1 θl˜rk,l−3k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The nonzero entries of the “nominal matrices” ¯Qk and “nominal vectors” ¯rk are generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from U(−1, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' For each tuple (i, j) ∈ B and indices k ∈ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2mI} and l ∈ {1, 2, 3}, we set ˜Qk,l ij := γk,l ij ¯Qk ij, where γk,l ij are generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from U(0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Similarly, for each index i ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , n}, k ∈ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=', 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2mI}, and l ∈ {1, 2, 3}, we set ˜rk,l i := δk,l i ¯rk i , where δk,l i are generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from U(0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Since each ˜Qk,l and ˜rk,l is a different perturbation of ¯Qk and ¯rk, the expansions of Qk and rk may be motivated using 4Alpine’s definition of relative gap differs slightly from BARON’s definition, see Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 13 principal components analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The nonzero entries of the remaining matrices Qk and vectors rk, k ∈ {0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2mI + 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , mI}, are the same across all 1000 instances and generated i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' from U(−1, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, the constraint coefficients are re-scaled such that the vectors b = d = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that for a fixed dimension n, each instance is uniquely specified by the parameters θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Random QCQPs with bilinear and univariate quadratic terms We also generate 1000 random QCQPs with |B| = min{5n, �n 2 � } bilinear terms and |Q| = ⌊0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='25n⌋ univariate quadratic terms for each of n ∈ {10, 20, 50} variables (all instances for a fixed n have the same set of bilinear and univariate quadratic terms).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The coefficients of quadratic terms in the objective and constraints are generated similarly to the coefficients of bilinear terms in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The rest of the model parameters and problem data are also generated similarly as in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 The pooling problem The pooling problem is a classical example of a bilinear program introduced by Haverly [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' It has several important applications in process systems engineering, including petroleum refining [33, 66], natural gas production [33, 42], and water treatment network design [10, 50, 59].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Its goal is to blend inputs of differing qualities at intermediate pools to produce outputs that meet quality specifications while satisfying capacity constraints at inputs, pools, and outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Solving the pooling problem is NP-hard [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We consider instances of the pooling problem with 45 inputs, 15 pools, 30 outputs, and a single quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Each instance has 116 input-output arcs, 71 input-pool arcs, and 53 pool-output arcs, yielding 572 variables and 621 constraints, including 360 linear constraints and 261 bilinear equations (with 124 variables involved in bilinear terms).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use the pq-formulation of the pooling problem outlined in Section 2 of [47].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that unlike the random bilinear instances in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 where all of the original “x variables” participate in bilinear terms, only 124 out of the 311 original variables in the pooling model participate in bilinear terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We first generate a nominal instance using the “random Haverly” instance generation approach5 in [47] that puts together 15 perturbed copies of one of Haverly’s pooling instances [30] and adds 150 edges to it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We modify the target output quality concentrations generated by [47] to construct harder instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' For each output j, we compute the minimum cmin j and maximum cmax j input concentrations of the quality over the subset of inputs from which there exists a path to output j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We then specify the lower and upper bound on the quality concentration at output j to be cmin j +αj(cmax j −cmin j ) and cmin j +βj(cmax j −cmin j ), respectively, where αj ∼ U(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4) and βj ∼ U(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8) are generated independently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We also rescale the capacities of the inputs, pools, and outputs and the costs of the arcs for better numerical performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that while all variables in the formulation are non-negative, upper bounds on the variables are not necessarily equal to one after rescaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' After constructing a nominal instance using the above procedure, we use it to generate 1000 random pooling instances by randomly perturbing each input’s quality concentration (parameters θ for this problem family) by up to 20%, uniformly and independently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Machine learning approximation of strong partitioning We detail our off-the-shelf ML approximation of strong partitioning in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Although our ultimate goal is to optimize the ML model so that its predictions yield good performance when they are used to inform Alpine’s partitions at the first iteration, we instead choose our ML model solely based on its accuracy of predicting the strong partitioning points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We do so because tuning the hyperparameters of the ML model directly for good performance within Alpine incurs a huge computational expense due to the need to re- evaluate the performance of the ML predictions within Alpine for each choice of the hyperparameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' While the choice of the ML model can have a significant impact on the performance of its predictions within Alpine, we leave the design of more sophisticated ML architectures for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='com/poolinginstances/poolinginstances 14 Scaled MAE < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='01 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='02 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='05 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 % Partitioning Points Bilinear n = 10 60 75 80 95 100 Bilinear n = 20 15 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 60 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 Bilinear n = 50 31 39 70 94 100 QCQP n = 10 65 80 95 100 100 QCQP n = 20 35 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 100 QCQP n = 50 56 66 85 99 100 Pooling 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Table 1: Statistics of scaled MAEs of the out-of-sample predictions of the ML model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use scikit-learn’s AdaBoost regressor6 [26] that implements the “AdaBoost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='R2 algorithm” [23] to learn a mapping from each QCQP instance to the strong partitioning points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Our base estimator is a scikit-learn regression tree7 [15] with maximum depth equal to 25, and we set the maximum number of weak learners for the boosting algorithm to 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The rest of scikit-learn’s AdaBoostRegressor options are set to default.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use 10-fold cross-validation to generate out-of-sample ML predictions for all 1000 QCQP instances in each problem family.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Specifically, we randomly split the 1000 instances in each family into 10 folds, use 9 out of 10 folds for training the ML model, predict the strong partitioning points for the omitted fold, and loop through different choices of the omitted fold to generate predictions for all 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We emphasize that we fit our ML model for prediction accuracy and do not perform much hyperparameter tuning since our ultimate goal is good performance of the ML predictions when used within Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ML model inputs and outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The choice of features for the ML model can greatly impact its perfor- mance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We use the following problem features as inputs to the ML model: i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' parameters θ, which uniquely parametrize each nonconvex QCQP instance, ii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' the best found feasible solution during Alpine’s presolve step (which involves a single local solve), and iii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' the McCormick lower bounding solution (obtained by solving a single convex program).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Although it is theoretically sufficient to use only the parameters θ as features because they uniquely identify each QCQP instance, we also use the features (ii) and (iii) since they are relatively cheap to compute and intuitively can help inform the partitioning strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' These additional fea- tures are also complicated transformations of the instance parameters θ that may otherwise be challenging to uncover.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The outputs of our ML model are the d partitioning points (excluding variable bounds) for each of the n partitioned variables, resulting in an output dimension of d × n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In contrast with much of the literature on learning for MILPs, we train separate ML models for each family of 1000 instances since both the feature and output dimensions of our ML models depend on the problem dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' While we plan to design more advanced ML architectures that can accommodate variable feature and output dimensions as part of future work, we do not consider the need to train a different ML model for each problem family to be a major limitation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This is because decision-makers often care about solving instances of the same problem family with only a few varying parameters, which means they only need to train a single ML model with fixed feature and output dimensions for their application.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We now summarize the out-of-sample prediction errors of our trained ML models when they are used to predict two strong partitioning points per partitioned variable (excluding variable bounds).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 1 provides statistics of the scaled mean absolute errors (MAEs) of the out-of-sample predictions of the 2n partitioning points (248 points for the pooling problem) produced by the ML model for each problem family.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The MAEs of the predicted partitioning points are averaged over the 1000 instances in each family and scaled by the upper bounds of the corresponding x variables—these upper bounds are simply equal to one for the random bilinear and QCQP instances, but are greater than one for some of the partitioned variables in the pooling 6https://scikit-learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='org/stable/modules/generated/sklearn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='AdaBoostRegressor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='html 7https://scikit-learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='org/stable/modules/generated/sklearn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='DecisionTreeRegressor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='html 15 Problem Family BARON Solution Time (seconds) Shifted GM Median Min Max # TLE TLE Gap (GM) Bilinear n = 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0 Bilinear n = 20 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 0 Bilinear n = 50 257.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 260.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 4637.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0 QCQP n = 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 0 QCQP n = 20 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 0 QCQP n = 50 268.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 246.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 6897.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 19 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−2 Pooling 441.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 422.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 7114.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 432 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 × 10−2 Table 2: Statistics of BARON solution times, including the shifted geometric mean, median, minimum, and maximum times over the subset of 1000 instances for which BARON does not hit the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The last two columns denote the number of instances for which BARON hits the time limit and the corresponding geometric mean of residual optimality gaps at termination, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Solution Method Solution Time (seconds) Shifted GM Median Min Max # TLE Alpine (default) 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 4020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 2 Alpine+SP2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 3864.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 1 Alpine+SP4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 3871.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 0 Table 3: (Benchmark QCQPs) Statistics of solution times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Columns correspond to the shifted geometric mean, median, minimum, and maximum times over the subset of 140 instances that do not hit the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The last column denotes the number of instances for which each method hits the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Roughly 90% or higher of the partitioning points predicted using ML have a scaled MAE of less than 10% for each problem family, which indicates that the same underlying ML model is able to generate reasonable predictions of the strong partitioning points across these different problem families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 Results and discussion We begin by benchmarking the hardness of our instances using BARON.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We then compare the performance of default Alpine with the use of strong partitioning and its ML approximation (described in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2) within Alpine through a few metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' All reported times are in seconds and do not include the time for solving the max-min problem (SP) or training the ML model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Benchmarking using BARON To illustrate the non-trivial nature of our instances, we present statistics of their run times using BARON in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' BARON solves the 10 variable and 20 variable random bilinear and QCQP instances within seconds, but takes over 4 minutes on average to solve the 50 variable instances and times out on 19/1000 of the 50 variable QCQP instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' BARON finds the random pooling instances to be significantly harder, timing out on 432/1000 instances8 and taking roughly 7 minutes to solve the remaining 568/1000 instances on average.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' As suggested in the literature, we use the shifted geometric mean as one of the metrics to compare the solution times of different algorithms on a family of test instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The shifted geometric mean 8BARON finds global solutions but is unable to prove global optimality within the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 16 (shifted GM) of a positive vector t ∈ RN + is defined as9: Shifted GM(t) = exp � 1 N N � i=1 ln � max(1, ti + shift) �� − shift, where we set shift = 10 when comparing solution times in seconds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The last column in Table 2 notes the GM of the relative optimality gap at termination for instances where BARON hits the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Following the definition of relative optimality gap in Alpine, this residual optimality gap is defined as UB−LB 10−6+|UB|, where UB and LB are the upper and lower bounds returned by BARON at termination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We emphasize that our goal is not to compare the different versions of Alpine with BARON but rather to illustrate that our instances and accelerations of Alpine are non-trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Evaluating strong partitioning on benchmark QCQPs We compare Alpine’s default partitioning strategy with the use of two/four strong partitioning points exclud- ing the bounds (Alpine+SP2 and Alpine+SP4, respectively) on a subset of BARON’s QCQP test library10 [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Specifically, we only consider the 140 QCQP instances from Bao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [7] with 20 variables in order to keep the time for solving the max-min problem manageable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 3 presents statistics of the run times of default Alpine, Alpine+SP2, and Alpine+SP4 on these 140 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine+SP2 and Alpine+SP4 are able to reduce the shifted GM of Alpine’s solution time by factors11 of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3, respectively, which indicates that strong partitioning has the potential to result in significant speedups on broad families of QCQPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 8 reports statistics of the solution times for the max-min problem over these 140 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 Evaluating the performance of strong partitioning and its ML approximation We compare Alpine’s default partitioning strategy with the use of two strong partitioning points (excluding variable bounds) per partitioned variable (Alpine+SP2) and its ML approximation (Alpine+ML2) in Alpine’s first iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' For the cases with n = 20, we also compare the above approaches with the use of four strong partitioning points (excluding the bounds) per partitioned variable (Alpine+SP4) and its ML approximation (Alpine+ML4) at Alpine’s first iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We compare these methods for each family of instances using two metrics: i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' statistics of solution times, and ii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' statistics of the effective optimality gap after Alpine’s first iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We define the effective relative optimality gap as Effective Optimality Gap = max � 10−4, v∗ − vLBD 10−6 + |v∗| � , (10) where v∗ is the optimal objective value, vLBD is Alpine’s lower bound after one iteration (using any one of the different approaches for specifying partitions), and 10−4 is the target optimality gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' By measuring the gap of vLBD relative to the optimal objective value v∗ instead of the best found feasible solution, we do not let the performance of the local solver impact our evaluation of the different partitioning methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Thresholding the optimality gap at 10−4 also lends equal importance to all optimality gaps less than the target since all such gaps are sufficient for Alpine to converge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 4 presents statistics of run times of default Alpine, Alpine with the different versions of strong partitioning at the first iteration, and Alpine with the different ML approximations of strong partitioning at the first iteration for the different problem families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 5 records the speedup/slowdown of the different versions of Alpine+SP and Alpine+ML over default Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 6 presents statistics of the effective optimality gaps (10) of the different approaches after one iteration, whereas Table 7 notes the GM of the residual effective optimality gaps on instances for which the different approaches hit the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 8 reports statistics of the solution times for the max-min problem for the different problem families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 9http://plato.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='asu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='edu/ftp/shgeom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='html 10These 140 “qcqp2” instances are from https://minlp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='com/nlp-and-minlp-test-problems 11These factors correspond to 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7% and 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1% average reductions in Alpine’s solution times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 17 Problem Family Solution Method Solution Time (seconds) Shifted GM Median Min Max # TLE Alpine (default) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='14 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='41 0 Bilinear n = 10 Alpine+SP2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='28 0 Alpine+ML2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='06 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='64 0 Alpine (default) 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 161.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 0 Alpine+SP2 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 132.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 0 Bilinear n = 20 Alpine+ML2 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 0 Alpine+SP4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0 Alpine+ML4 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0 Alpine (default) 405.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 336.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 7135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 24 Bilinear n = 50 Alpine+SP2 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 5705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 4 Alpine+ML2 101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 7071.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 5 Alpine (default) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='85 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='62 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='29 0 QCQP n = 10 Alpine+SP2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='27 0 Alpine+ML2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='07 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='89 0 Alpine (default) 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0 Alpine+SP2 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0 QCQP n = 20 Alpine+ML2 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 180.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0 Alpine+SP4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 125.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 0 Alpine+ML4 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0 Alpine (default) 391.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 289.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 7198.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0 QCQP n = 50 Alpine+SP2 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 6055.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0 Alpine+ML2 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 118.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 6514.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0 Alpine (default) 242.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 7091.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 7 Pooling Alpine+SP2 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 6127.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 5 Alpine+ML2 117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 6097.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 1 Table 4: (Solution Times) Statistics of solution times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Columns correspond to the shifted geometric mean, median, minimum, and maximum times over the subset of 1000 instances that did not hit the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The last time column denotes the number of instances for which each method hits the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Figures 2, 3, and 4 plot solution profiles and histograms of the factor improvements of the effective optimality gaps for the bilinear, QCQP, and pooling families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We do not plot performance profiles due to their known issues (see http://plato.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='asu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='edu/bench.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='html).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bilinear Instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 4 implies Alpine+SP2 is able to reduce the shifted GM of default Alpine’s solution time by factors of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1, and 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7, respectively, for n = 10, n = 20, and n = 50 over 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine+ML2 is able to generate a moderate approximation of Alpine+SP2 overall, reducing the shifted GM of default Alpine’s solution time by factors of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1, and 4, respectively, for n = 10, n = 20, and n = 50 over the same 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' For the n = 20 instances, Alpine+SP4 and Alpine+ML4 reduce the shifted GM of default Alpine’s solution time by factors of 9 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 5 implies Alpine+SP2 results in at least 5× speedup over default Alpine on 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3% of the n = 10 instances, and results in at least 10× speedup on 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9% and 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1% of the n = 20 and n = 50 instances, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' On the other hand, Alpine+ML2 yields at least 5× speedup over default Alpine on 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1%, 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2%, and 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2% of the n = 10, n = 20, and n = 50 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine+SP4 results in at least 10× speedup over default Alpine on 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7% of the n = 20 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, Alpine+SP2 results in a maximum speedup of 15×, 49×, and 685× for the 18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1 2 3 Time T (seconds) 0 20 40 60 80 100 % instances solved within time T n = 10 Default SP2 ML2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Time T (seconds) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='ML4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='500 2000 7200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Time T (seconds) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Gap reduction factor (1st iteration) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='% of instances ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='200 500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Gap reduction factor (1st iteration) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='200 500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Gap reduction factor (1st iteration) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Figure 2: (Bilinear Instances) Top row: solution profiles indicating the percentage of instances solved by ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='the different methods within time T seconds (higher is better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bottom row: histogram plots of the ratios of the effective optimality gaps (10) of default Alpine with Alpine+SP2 and with Alpine+ML2 after one iteration (larger gap reduction factors are better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 1 2 3 Time T (seconds) 0 20 40 60 80 100 % instances solved within time T n = 10 Default SP2 ML2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 2 5 20 50 200 Time T (seconds) 0 20 40 60 80 100 n = 20 Default SP2 ML2 SP4 ML4 2 5 20 50 200 500 2000 7200 Time T (seconds) 0 20 40 60 80 100 n = 50 Default SP2 ML2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Gap reduction factor (1st iteration) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='% of instances ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='200 500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Gap reduction factor (1st iteration) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='200 500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Gap reduction factor (1st iteration) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='n = 50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Default/ML2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Figure 3: (QCQP Instances) Top row: solution profiles indicating the percentage of instances solved by ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='the different methods within time T seconds (higher is better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bottom row: histogram plots of the ratios of the effective optimality gaps (10) of default Alpine with Alpine+SP2 and with Alpine+ML2 after one iteration (larger gap reduction factors are better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2 5 20 50 200 500 2000 7200 Time T (seconds) 0 20 40 60 80 100 % instances solved within time T Default SP2 ML2 1 2 5 20 50 200 500 Gap reduction factor (1st iteration) 0 10 20 30 40 % of instances Default/SP2 Default/ML2 Figure 4: (Pooling Instances) Left plot: solution profile indicating the percentage of instances solved by the different methods within time T seconds (higher is better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Right plot: histogram plots of the ratios of the effective optimality gaps (10) of default Alpine with Alpine+SP2 and with Alpine+ML2 after one iteration (larger gap reduction factors are better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 20 Problem Family Solution Method Speedup/Slowdown Factor < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 − 1 1 − 2 2 − 5 5 − 10 10 − 20 20 − 50 > 50 Bilinear n = 10 % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0 0 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0 0 % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 Bilinear n = 20 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 % Alpine+SP4 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 % Alpine+ML4 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 Bilinear n = 50 % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 QCQP n = 10 % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 0 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 0 0 % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 QCQP n = 20 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 % Alpine+SP4 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 % Alpine+ML4 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 QCQP n = 50 % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0 Pooling % Alpine+SP2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 % Alpine+ML2 inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 0 Table 5: (Speedup/Slowdown) Statistics of the speedup/slowdown of the different versions of Alpine with SP and its ML approximation (relative to default Alpine).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' n = 10, n = 20, and n = 50 instances, whereas Alpine+ML2 results in a maximum speedup of 13×, 38×, and 197× for the same sets of instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 6 implies Alpine+SP2 reduces the GM of default Alpine’s effective optimality gap (10) after the first iteration by factors of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5, 2200, and 80, respectively, for n = 10, n = 20, and n = 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine+ML2 reduces the GM of default Alpine’s effective gap after the first iteration by factors of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6, 180, and 15, respectively, for the n = 10, n = 20, and n = 50 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Interestingly, Alpine+SP2 is able to close the effective gap in the first iteration for 100%, 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3%, and 46% of the n = 10, n = 20, and n = 50 instances, whereas default Alpine is able to close the gap in the first iteration for at most 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1% of the instances for these different problem families, which demonstrates the effectiveness of the strong partitioning strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, Table 7 shows that Alpine+SP2 and Alpine+ML2 terminate with smaller average optimality gaps on the n = 50 instances where they time out compared to Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' QCQP Instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 4 implies Alpine+SP2 is able to reduce the shifted GM of default Alpine’s solution time by factors of 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2, and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2, respectively, for n = 10, n = 20, and n = 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine+ML2 is able to generate a moderate approximation of Alpine+SP2, reducing the shifted GM of default Alpine’s solution time by factors of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1, and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9, respectively, for n = 10, n = 20, and n = 50 over the same 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' For the n = 20 instances, Alpine+SP4 and Alpine+ML4 reduce the shifted GM of default Alpine’s solution time by factors of 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 5 implies Alpine+SP2 results in at least 10× speedup over default Alpine on 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5%, 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6%, and 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1% of the n = 10, n = 20, and n = 50 instances, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' On the other hand, Alpine+ML2 yields at least 5× speedup over default Alpine on 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7%, 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7%, and 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7% of the n = 10, n = 20, and n = 50 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alpine+SP4 results in at least 20× speedup over default Alpine on 61% of the n = 20 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, Alpine+SP2 results in a maximum speedup of 22×, 87×, and 98× for the n = 10, n = 20, and n = 50 instances, whereas Alpine+ML2 results in a maximum speedup of 19×, 56×, and 32× for the same sets of instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 21 Problem Family Solution Method Optimality Gap after 1 iteration % Instances GM Median Min Max Gap Closed Alpine (default) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−4 10−4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Bilinear n = 10 Alpine+SP2 10−4 10−4 10−4 10−4 100 Alpine+ML2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 × 10−4 10−4 10−4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 Alpine (default) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−1 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−2 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−1 0 Alpine+SP2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−4 10−4 10−4 6 × 10−3 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 Bilinear n = 20 Alpine+ML2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−3 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−1 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 Alpine+SP4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 × 10−4 10−4 10−4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 × 10−4 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 Alpine+ML4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 × 10−3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−3 10−4 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−2 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 Alpine (default) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 × 10−2 10−4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Bilinear n = 50 Alpine+SP2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 × 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 × 10−4 10−4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−1 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 Alpine+ML2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−4 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−4 10−4 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 Alpine (default) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 × 10−3 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−2 0 QCQP n = 10 Alpine+SP2 10−4 10−4 10−4 10−4 100 Alpine+ML2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 × 10−4 10−4 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−1 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 Alpine (default) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−2 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 × 10−3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 × 10−1 0 Alpine+SP2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 × 10−4 10−4 10−4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−3 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 QCQP n = 20 Alpine+ML2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 × 10−3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−3 10−4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 Alpine+SP4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 × 10−4 10−4 10−4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 Alpine+ML4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 × 10−3 10−4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 × 10−2 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 Alpine (default) 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 × 10−3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 × 10−2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−2 0 QCQP n = 50 Alpine+SP2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−4 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 × 10−3 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0 Alpine+ML2 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−4 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−2 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 Alpine (default) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 × 10−3 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−2 0 Pooling Alpine+SP2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−4 10−4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 × 10−3 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2 Alpine+ML2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5 × 10−3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−3 10−4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 × 10−3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 Table 6: (Effective Optimality Gaps) Statistics of effective optimality gaps (10) after one iteration (note: minimum possible value = 10−4, the target gap).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Columns record the geometric mean, median, minimum, and maximum effective gaps over 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The last column is the percentage of instances for which each method results in the minimum possible effective optimality gap of 10−4 after one iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 6 implies Alpine+SP2 reduces the GM of default Alpine’s effective optimality gap (10) after the first iteration by factors of 13, 300, and 50, respectively, for the n = 10, n = 20, and n = 50 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' On the other hand, Alpine+ML2 reduces the GM of default Alpine’s effective gap after the first iteration by factors of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3, 31, and 17, respectively, for n = 10, n = 20, and n = 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that Alpine+SP2 is able to close the effective gap in the first iteration for 100%, 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2%, and 39% of the n = 10, n = 20, and n = 50 instances, whereas default Alpine is unable to close the gap in the first iteration for any instance in these problem families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Pooling Instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 4 implies Alpine+SP2 and Alpine+ML2 reduce the shifted GM of default Alpine’s solution time by factors of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 over the 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 5 implies Alpine+SP2 and Alpine+ML2 result in at least 5× speedup over default Alpine on 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7% and 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5% of the instances, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Table 6 implies Alpine+SP2 and Alpine+ML2 reduce the GM of default Alpine’s effective optimality gap (10) after the first iteration by factors of 28 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' After the first iteration, Alpine+SP2 closes the effective optimality gap for 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2% of the instances, whereas default Alpine is unable 22 Problem Family Bilinear n = 50 Pooling Method Alpine (default) Alpine+SP2 Alpine+ML2 Alpine (default) Alpine+SP2 Alpine+ML2 TLE Gap (GM) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4 × 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 × 10−4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='9 × 10−4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1 × 10−4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 × 10−4 Table 7: (Effective TLE Optimality Gaps) Geometric mean of residual effective optimality gaps (target = 10−4) on instances for which methods hit the time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Problem Family Solution Method Max-Min Solution Time (seconds) Shifted GM Median Min Max Std.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Dev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Bilinear n = 10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Bilinear n = 20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='528 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='445 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='136 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2389 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='544 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1244 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1117 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='374 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4360 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='893 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Bilinear n = 50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7070 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7404 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1271 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='23166 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3268 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='QCQP n = 10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='53 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='QCQP n = 20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1731 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1826 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='171 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4244 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='654 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2152 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2740 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='471 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='5965 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='961 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='QCQP n = 50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='16964 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='17074 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8626 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='23551 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='2319 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Pooling ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='15658 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='15148 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='1088 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='77029 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='8657 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Benchmark QCQPs ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='413 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='364 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='27907 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='4432 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='SP4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='895 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='651 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='136320 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='15444 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='Table 8: (Max-Min Solution Times) Statistics of max-min solution times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Columns correspond to the shifted geometric mean, median, minimum, maximum, and standard deviation of times for solving the max- min problem (SP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' to close the gap for any of the 1000 instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, Alpine+SP2 and Alpine+ML2 result in maximum speedups of 120× and 41×.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In summary, Tables 4 to 6 and Figures 2 to 4 clearly show the benefits of strong partitioning and its ML approximation over Alpine’s default partitioning strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' They also demonstrate that Alpine+SP and Alpine+ML are able to match or even outperform (particularly on the pooling instances) the performance of the state-of-the-art solver BARON (with default options) on average over the different problem families.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' While our off-the-shelf ML model is able to yield a moderate approximation of SP across these different problem families, there is a clear scope for significant improvement with tailored ML approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 6 Future work There are several interesting avenues for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' First, instead of prespecifying the number of partition- ing points per variable for SP, we could allocate a different number of partitions per variable based on their relative impact on the lower bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Suppose we wish to specify at most d + 2 partitioning points for each variable and are given a budget B ∈ [d × n] for the total number of partitioning points across all variables (excluding variable bounds).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' We can solve the following max-min problem to determine both the optimal allocation of partitions and the optimal specification of partitioning points across the partitioned variables: max (P,Z)∈Pz v(P), where P := (p1, p2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , pn) denotes the (potential) partitioning points, v(P) is defined in (PMR-OA), and Z := (z1, z2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' , zn) is a d×n matrix of binary decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The partitioning point pi j is added to the partition 23 Pi of xi only if the variable zi j takes the value 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, the MILP representable set PZ is defined as PZ := � (P, Z) ∈ P × {0, 1}d×n : n � i=1 d � j=1 zi j = B, zi j = 0 =⇒ pi j = 0, ∀(i, j) ∈ [n] × [d] � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' If zi j = 0, then the partitioning point pi j is made redundant by forcing it to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Note that the above outer- maximization problem involves binary decision variables Z unlike the strong partitioning problem (SP), which necessitates new techniques for its solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Second, designing more efficient approaches for solving the strong partitioning problem (SP) would help scale our approach to larger problem dimensions (and also make it easier to generating more training data).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Third, designing tailored ML architectures that can achieve similar speedups as strong partitioning and can accommodate variable feature and output dimensions merits investigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Fourth, motivated by the cluster problem [34, 35], it would be interesting to explore variants that choose a different subset of variables to be partitioned at each iteration within Alpine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Finally, using strong partitioning to choose Alpine’s partitions at the second iteration and beyond can help promote convergence of its bounds in fewer iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Acknowledgments The authors gratefully acknowledge funding from Los Alamos National Laboratory’s (LANL’s) “Center for Nonlinear Studies” and the U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Department of Energy’s “Laboratory Directed Research and Development (LDRD)” program under the projects “20230091ER: Learning to Accelerate Global Solutions for Non-convex Optimization” and “20210078DR: The Optimization of Machine Learning: Imposing Requirements on Ar- tificial Intelligence.”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' This research used resources provided by LANL’s Darwin testbed,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' which is funded by the Computational Systems and Software Environments subprogram of LANL’s Advanced Simulation and Computing program (NNSA/DOE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' References [1] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Achterberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Constraint integer programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' PhD thesis, Technischen Universit¨at Berlin, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [2] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Al-Khayyal and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Falk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Jointly constrained biconvex programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mathematics of Operations Research, 8(2): 273–286, 1983.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [3] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alfaki and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Haugland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A multi-commodity flow formulation for the generalized pooling problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 56(3):917–937, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [4] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alvarez, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Louveaux, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Wehenkel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A machine learning-based approximation of strong branching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' INFORMS Journal on Computing, 29(1):185–195, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [5] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Balcan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Dick, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sandholm, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Vitercik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Learning to branch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In International Conference on Machine Learning, pages 344–353.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' PMLR, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [6] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Baltean-Lugojan, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bonami, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Misener, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tramontani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Scoring positive semidefinite cut- ting planes for quadratic optimization via trained neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Optimization Online, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' URL https://optimization-online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='org/2018/11/6943/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [7] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bao, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sahinidis, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tawarmalani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Semidefinite relaxations for quadratically constrained quadratic program- ming: A review and comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mathematical Programming, 129(1):129–157, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [8] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Belotti, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lee, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Liberti, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Margot, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' W¨achter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Branching and bounds tightening techniques for non-convex MINLP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Optimization Methods & Software, 24(4-5):597–634, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [9] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bengio, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lodi, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Prouvost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Machine learning for combinatorial optimization: a methodological tour d’horizon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' European Journal of Operational Research, 290(2):405–421, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [10] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bergamini, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Grossmann, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Scenna, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Aguirre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' An improved piecewise outer-approximation algorithm for the global optimization of MINLP models involving concave and bilinear terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Computers & Chemical Engineering, 32(3): 477–493, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [11] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bestuzheva, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Besan¸con, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Chen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Chmiela, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Donkiewicz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' van Doornmalen, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Eifler, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gaul, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gamrath, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gleixner, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The SCIP optimization suite 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' arXiv preprint: 2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='08872, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [12] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bienstock, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Escobar, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gentile, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Liberti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mathematical programming formulations for the alternating current optimal power flow problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Annals of Operations Research, pages 1–39, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 24 [13] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Billionnet, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Elloumi, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lambert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Extending the QCR method to general mixed-integer programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mathematical Programming, 131(1):381–401, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [14] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bonami, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lodi, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Zarpellon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Learning a classification of mixed-integer quadratic programming problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In International Conference on the Integration of Constraint Programming, Artificial Intelligence, and Operations Research, pages 595–604.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Springer, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [15] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Breiman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Friedman, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Olshen, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Stone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Classification and regression trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Routledge, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [16] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Burer and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Vandenbussche.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A finite branch-and-bound algorithm for nonconvex quadratic programming via semidef- inite relaxations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mathematical Programming, 113(2):259–282, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [17] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Cappart, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ch´etelat, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Khalil, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lodi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Morris, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Veliˇckovi´c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Combinatorial optimization and reasoning with graph neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' arXiv preprint: 2102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='09544, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [18] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Castro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Normalized multiparametric disaggregation: an efficient relaxation for mixed-integer bilinear problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 64(4):765–784, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [19] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Cengil, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nagarajan, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bent, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Eksioglu, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Eksioglu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Learning to accelerate globally optimal solutions to the AC optimal power flow problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Electric Power Systems Research, 212:108275, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [20] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Costa, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Hansen, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Liberti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' On the impact of symmetry-breaking constraints on spatial branch-and-bound for circle packing in a square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Discrete Applied Mathematics, 161(1-2):96–106, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [21] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' De Wolf and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Smeers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Generalized derivatives of the optimal value of a linear program with respect to matrix coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' European Journal of Operational Research, 291(2):491–496, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [22] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Di Liberto, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kadioglu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Leo, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Malitsky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' DASH: Dynamic approach for switching heuristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' European Journal of Operational Research, 248(3):943–953, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [23] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Drucker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Improving regressors using boosting techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In ICML, volume 97, pages 107–115.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Citeseer, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [24] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Etheve, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Al`es, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bissuel, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Juan, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kedad-Sidhoum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Reinforcement learning for variable selection in a branch and bound algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In International Conference on Integration of Constraint Programming, Artificial Intelligence, and Operations Research, pages 176–185.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Springer, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [25] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Freund.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Postoptimal analysis of a linear program under simultaneous changes in matrix coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In Mathematical Programming Essays in Honor of George B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Dantzig Part I, pages 1–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Springer, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [26] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Freund and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Schapire.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A decision-theoretic generalization of on-line learning and an application to boosting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Computer and System Sciences, 55(1):119–139, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [27] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gasse, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ch´etelat, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ferroni, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Charlin, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lodi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Exact combinatorial optimization with graph convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [28] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ghaddar, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' G´omez-Casares, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gonz´alez-D´ıaz, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gonz´alez-Rodr´ıguez, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Pateiro-L´opez, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Rodr´ıguez-Ballesteros.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Learning for spatial branching: An algorithm selection approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' arXiv preprint arXiv:2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='10834, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [29] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gonz´alez-Rodr´ıguez, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alvite-Paz´o, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Alvite-Paz´o, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ghaddar, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gonz´alez-D´ıaz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Polynomial optimization: Enhancing RLT relaxations with conic constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='05608, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [30] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Haverly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Studies of the behavior of recursion for the pooling problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ACM SIGMAP Bulletin, 25:19–28, 1978.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [31] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' He, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Daume III, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Eisner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Learning to search in branch and bound algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 27:3293–3301, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [32] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Im.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sensitivity analysis and robust optimization: A geometric approach for the special case of linear optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Master’s thesis, University of Waterloo, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [33] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kannan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Algorithms, analysis and software for the global optimization of two-stage stochastic programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' PhD thesis, Massachusetts Institute of Technology, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [34] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kannan and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Barton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The cluster problem in constrained global optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 69 (3):629–676, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [35] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kannan and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Barton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Convergence-order analysis of branch-and-bound algorithms for constrained problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 71(4):753–813, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [36] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Khalil, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Le Bodic, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Song, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nemhauser, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Dilkina.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Learning to branch in mixed integer programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In Proceedings of the AAAI Conference on Artificial Intelligence, volume 30, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [37] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kim, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Richard, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tawarmalani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Piecewise polyhedral relaxations of multilinear optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Optimization Online, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' URL http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='optimization-online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='org/DB_HTML/2022/07/8974.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [38] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kolodziej, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Castro, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Grossmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Global optimization of bilinear programs with a multiparametric disaggregation technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 57(4):1039–1063, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [39] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Koopmans and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Beckmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Assignment problems and the location of economic activities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Econometrica: Journal of the Econometric Society, pages 53–76, 1957.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [40] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kotary, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Fioretto, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Van Hentenryck, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Wilder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' End-to-end constrained optimization learning: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' arXiv preprint: 2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='16378, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [41] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lee, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ma, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Yu, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Dai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Accelerating generalized Benders decomposition for wireless resource allocation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' IEEE 25 Transactions on Wireless Communications, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [42] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Li, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Armagan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tomasgard, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Barton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Stochastic pooling problem for natural gas production network design and operation under uncertainty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' AIChE Journal, 57(8):2120–2135, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [43] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lin and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Schrage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' The global solver in the LINDO API.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Optimization Methods & Software, 24(4-5):657–668, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [44] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Liu, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ploskas, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sahinidis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tuning BARON using derivative-free optimization algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 74(4):611–637, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [45] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lodi and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Zarpellon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' On learning and branching: a survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Top, 25(2):207–236, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [46] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nagarajan, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bent, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Eksioglu, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mason.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tight piecewise convex relaxations for global optimization of optimal power flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In 2018 Power Systems Computation Conference, pages 1–7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' IEEE, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [47] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Luedtke, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' d’Ambrosio, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Linderoth, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Schweiger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Strong convex nonlinear relaxations of the pooling problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' SIAM Journal on Optimization, 30(2):1582–1609, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [48] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' M¨akel¨a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Multiobjective proximal bundle method for nonconvex nonsmooth optimization: Fortran subroutine mpb- ngc 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' url: http://napsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='karmitsa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='fi/publications/pbncgc_report.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Reports of the Department of Mathematical Information Technology, Series B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Scientific Computing, B, 13, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [49] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' McCormick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Computability of global solutions to factorable nonconvex programs: Part I—convex underestimating problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Mathematical Programming, 10(1):147–175, 1976.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [50] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Misener and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Floudas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' GloMIQO: Global mixed-integer quadratic optimizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 57 (1):3–50, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [51] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Misener and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Floudas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' ANTIGONE: algorithms for continuous/integer global optimization of nonlinear equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 59(2):503–526, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [52] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nagarajan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lu, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Yamangil, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tightening McCormick relaxations for nonlinear programs via dynamic multivariate partitioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In International conference on principles and practice of constraint programming, pages 369–387.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Springer, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [53] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nagarajan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Wang, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bent, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sundar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' An adaptive, multivariate partitioning algorithm for global optimization of nonconvex programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Global Optimization, 74(4):639–675, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [54] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nair, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bartunov, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gimeno, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' von Glehn, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lichocki, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lobov, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' O’Donoghue, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sonnerat, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Tjandraatmadja, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Wang, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Addanki, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Hapuarachchi, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Keck, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Keeling, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Kohli, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Ktena, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Li, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Vinyals, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Zwols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Solving mixed integer programs using neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' arXiv preprint: 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='13349, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [55] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nannicini, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Belotti, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Linderoth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Margot, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' W¨achter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A probing algorithm for MINLP with failure prediction by SVM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In International Conference on AI and OR Techniques in Constriant Programming for Combinatorial Optimization Problems, pages 154–169.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Springer, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [56] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nohra, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Raghunathan, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sahinidis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Spectral relaxations and branching strategies for global optimization of mixed-integer quadratic programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' SIAM Journal on Optimization, 31(1):142–171, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [57] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Pedregosa, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Varoquaux, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Gramfort, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Michel, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Thirion, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Grisel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Blondel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Prettenhofer, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Weiss, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Dubourg, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Vanderplas, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Passos, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Cournapeau, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Brucher, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Perrot, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Duchesnay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Scikit-learn: Machine learning in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Machine Learning Research, 12:2825–2830, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [58] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sahinidis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' BARON: A general purpose global optimization software package.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of global optimization, 8(2): 201–205, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [59] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Saif, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Elkamel, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Pritzker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Global optimization of reverse osmosis network for wastewater treatment and minimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Industrial & Engineering Chemistry Research, 47(9):3060–3070, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [60] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sherali and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Adams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A reformulation-linearization technique for solving discrete and continuous nonconvex problems, volume 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Springer Science & Business Media, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [61] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Still.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lectures on parametric optimization: An introduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Optimization Online, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' URL https://optimization-online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='org/2018/04/6587/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [62] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Sundar, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Nagarajan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Linderoth, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Wang, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Piecewise polyhedral formulations for a multilinear term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Operations Research Letters, 49(1):144–149, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [63] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Vavasis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Quadratic programming is in NP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Information Processing Letters, 36(2):73–77, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [64] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' V´azquez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' R¨uckmann, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Stein, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Still.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Generalized semi-infinite programming: a tutorial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Journal of Computational and Applied Mathematics, 217(2):394–419, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [65] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Wicaksono and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Karimi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Piecewise MILP under- and overestimators for global optimization of bilinear programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' AIChE Journal, 54(4):991–1008, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [66] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Yang and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Barton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Integrated crude selection and refinery optimization under uncertainty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' AIChE journal, 62(4): 1038–1053, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' [67] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Zarpellon, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Jo, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Lodi, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' Parameterizing branch-and-bound search trees to learn branching policies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 3931–3939, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} +page_content=' 26' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_NAyT4oBgHgl3EQfdvce/content/2301.00306v1.pdf'} diff --git a/_NFJT4oBgHgl3EQfqSwP/vector_store/index.pkl b/_NFJT4oBgHgl3EQfqSwP/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a3c0b34f884452b60eafa1b5958b0c63bfec001e --- /dev/null +++ b/_NFJT4oBgHgl3EQfqSwP/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30141a7ec2eb0dd595584476be223771a25e706483968191b6e251d0e9635c4 +size 192824 diff --git a/_NFQT4oBgHgl3EQf7jbZ/content/2301.13443v1.pdf b/_NFQT4oBgHgl3EQf7jbZ/content/2301.13443v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..85a204d7dccb027658470e201b2951f0c2f50eb7 --- /dev/null +++ b/_NFQT4oBgHgl3EQf7jbZ/content/2301.13443v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1a2db8d99ee917542f8df51b03df68d909c94fae0be8315c8502644c0e43905 +size 1396599 diff --git a/_NFQT4oBgHgl3EQf7jbZ/vector_store/index.pkl b/_NFQT4oBgHgl3EQf7jbZ/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..fb20beb11f61a924839570de0b2ef4cc5488f31c --- /dev/null +++ b/_NFQT4oBgHgl3EQf7jbZ/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:345af511605b5a4e977687d3f518eab5221ab08ccdfedf8f02d63f8f3d6b42cb +size 199990 diff --git a/_dE1T4oBgHgl3EQfogTY/vector_store/index.faiss b/_dE1T4oBgHgl3EQfogTY/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..5f2d85aa9367ec6a1847f7578db55c7b7d28dfcd --- /dev/null +++ b/_dE1T4oBgHgl3EQfogTY/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86e99f2c1a28725430b589b7e2013885a27d74fae46f38324aac9be1f66be9af +size 5046317 diff --git a/_tFQT4oBgHgl3EQfLjUE/content/tmp_files/2301.13264v1.pdf.txt b/_tFQT4oBgHgl3EQfLjUE/content/tmp_files/2301.13264v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..f7e13d0825411000d09248c8f6c01bd68b6f396c --- /dev/null +++ b/_tFQT4oBgHgl3EQfLjUE/content/tmp_files/2301.13264v1.pdf.txt @@ -0,0 +1,986 @@ +Communication: Non-adiabatic derivative coupling elements for the coupled +cluster singles and doubles model +Eirik F. Kjønstad1, 2, a) and Henrik Koch2, 3 +1)Department of Chemistry and The PULSE Institute, Stanford University, Stanford, California 94305, +USA +2)Department of Chemistry, Norwegian University of Science and Technology, 7491 Trondheim, +Norway +3)Scuola Normale Superiore, Piazza dei Cavaleri 7, 56126 Pisa, Italy +(Dated: 1 February 2023) +We present an efficient implementation of analytical non-adiabatic derivative coupling elements for the cou- +pled cluster singles and doubles model. The derivative coupling elements are evaluated in a biorthonormal +formulation in which the nuclear derivative acts on the right electronic state, where this state is biorthonormal +with respect to the set of left states. This stands in contrast to earlier implementations based on normal- +ized states and a gradient formula for the derivative coupling. As an illustration of the implementation, we +determine a minimum energy conical intersection between the nπ∗ and ππ∗ states in the nucleobase thymine. +I. +INTRODUCTION +The nuclear dynamics that follows photoexcitation +typically involves non-adiabatic population transfer be- +tween several electronic states. For example, in the nucle- +obase thymine, photoexcitation to the bright ππ∗ state is +followed by rapid (60 fs) non-adiabatic population trans- +fer to the dark nπ∗ state.1 As is well known, the approx- +imate description of the electronic structure can have a +dramatic qualitative impact on the simulated nuclear dy- +namics, often complicating the task of correctly identify- +ing the actual physics behind the processes observed in +pump-probe experiments.2,3 A recent example is the on- +going debate about the dynamics that follows excitation +to the bright B3u state in pyrazine.4–8 The ambiguities +involved in interpreting time-resolved spectra illustrate +the need for highly accurate description of the electronic +structure. +A number of electronic structure methods has a long +history of being applied to treat non-adiabatic effects, in- +cluding complete active space9 (CAS) methods, density +functional theory10 (DFT), and algebraic diagrammatic +construction11 (ADC). These methods are often comple- +mentary, where some are able to describe static correla- +tion in the ground state and ground state intersections +(CAS) while others better capture dynamical correlation +but are unable to treat static correlation in the ground +state as well as actual crossings with the ground state +(DFT, ADC). In the latter category, there is still a need +for a method that has systematically improvable accu- +racy that extends beyond a perturbative description of +double excitations. +Coupled cluster theory is now well-established as the +method of choice whenever this level of accuracy is re- +quired and the ground state is accurately described by +a single determinant. However, initial progress towards +a)Electronic mail: eirik.kjonstad@ntnu.no +its use in nonadiabatic dynamics simulations was slowed +down with the realization12,13 that the method produces +non-physical results at electronic degeneracies when the +states that cross span the same symmetry. Later work by +the present authors and collaborators showed that these +artifacts were caused by the loss of electronic state or- +thogonality (matrix defects)14 and that they could be +fully removed by enforcing orthogonality relations be- +tween the electronic states.15,16 Our current understand- +ing is that coupled cluster methods are able to de- +scribe conical intersections when the states span different +symmetries but corrections13,15,16 are required when the +states span the same symmetry. However, these conclu- +sions are based on studies of the potential energy surfaces +and not from considerations of the predicted physics. It +still remains an open question to what extent the artifacts +at same-symmetry intersections negatively affect the pre- +dicted dynamics in trajectory-based simulation methods +like surface hopping17 and ab initio multiple spawning.18 +Already in 1999 Christiansen19 derived expressions for +the derivative coupling elements in coupled cluster the- +ory, but the first implementation was given later by Tajti +and Szalay20 at the singles and doubles level (CCSD). +These authors did not, however, implement the expres- +sions in Ref. 19. +Instead, the coupling was evaluated +from the gradient of the two states as well as the gra- +dient of a fictitious summed state; this summed-state +approach was also used in a more recent implementa- +tion of the CCSD coupling elements.21 In addition, they +proposed modifications to account for the fact that the +coupled cluster states are not normalized, building on +earlier work by Gauss and coworkers22 who had found +that normalization is important when evaluating the di- +agonal Born-Oppenheimer correction to the energy. The +need for normalization in dynamics, which is not trivial +to achieve, was later questioned by Shamasundar.23 In +a recent publication, we confirmed this by showing that +a biorthonormal formalism exists in which there is no +dependence on the norm of the electronic states.24 +In the present work, we provide a derivation (which is +arXiv:2301.13264v1 [physics.chem-ph] 30 Jan 2023 + +2 +equivalent to Ref. 19) and implementation, at the CCSD +level of theory, of the derivative coupling between ground +and excited states as well as between excited states. +The derivation follows the Lagrangian approach for the +derivative coupling proposed by Hohenstein in the con- +text of CAS configuration interaction (CASCI),25 while +the present implementation builds on an efficient imple- +mentation of analytical gradients, exploiting Cholesky +decomposed electronic repulsion integrals, recently pub- +lished by the authors and collaborators.26 +II. +THEORY +A. +Lagrangian +The derivative coupling between states i and j is19,24 +Fij = ⟨ψL +i |∇ψR +j ⟩, +i, j = 0, 1, 2, . . . , +(1) +where L and R signify that these are the left and right +electronic states, and the gradient ∇ is taken with respect +to the coordinates of the atomic nuclei. +Analytical expressions for Fij may be derived by using +the Lagrangian technique. Here, we use the Lagrangian +proposed by Hohenstein.25 For the coupled cluster case, +this Lagrangian can be expressed as24 +Lij = Oij + conditions +(2) +where +Oij = ⟨ψL +i (x0)|ψR +j (x)⟩. +(3) +Here we have made the dependence on the nuclear ge- +ometry explicit: x0 is the geometry where the derivative +is to be evaluated, while x is allowed to vary. Upon dif- +ferentiating Lij, the derivative operation ∇ only acts on +the ket vector. As a result, the derivative of Lij at x0 is +identical to Fij at x0.24,25 +The conditions in Lij are those that are required to +specify the right state ψR +j for all values of x. These are: +the Hartree-Fock equations, for specifying the orbitals; +the amplitude equations, for specifying the ground state +cluster amplitudes; and the excited state eigenvalue equa- +tions, for specifying the excited state amplitudes. Writ- +ten out in detail, the Lagrangian reads +Lij = Oij + +� +µ +¯ζµ⟨µ| ¯H |HF⟩ ++ +� +µ +¯γµ +� +⟨µ|[ ¯H, Rj]|HF⟩ − ωjRj +µ +� ++ ¯ξ(1 − ⟨Lj |Rj⟩) + +� +ai +¯κaiFai, +(4) +where we have suppressed the dependence on x0 for no- +tational convenience. +This expression for Lij introduces various quantities. +The coupled cluster conditions are expressed in terms of +the similarity-transformed Hamiltonian +¯H = exp(−T) exp(κ)H exp(−κ) exp(T), +(5) +where we have introduced the orbital rotation operator +κ = +� +ai +κaiE− +ai, +E− +ai = Eai − Eia, +(6) +as well as the cluster operator +T = +� +µ +tµτµ +(7) +The scalars tµ are known as cluster amplitudes, and the +τµ denote excitation operators. The Eai are singlet one- +electron excitation operators and Eia are corresponding +deexcitation operators. Here, κ(x0) = 0 by assumption. +The electronic states are conveniently expressed as +|ψR +k ⟩ = Rk exp(T)|HF⟩ +(8) +⟨ψL +k | = ⟨HF|Lk exp(−T) +(9) +where +Rk = Rk +0 + Rk = Rk +0 + +� +µ +Rk +µτµ +(10) +Lk = Lk +0 + Lk = Lk +0 + +� +µ +Lk +µτ † +µ. +(11) +We will also find it useful to write +|Rk⟩ = Rk +0|HF⟩ + |Rk⟩ +(12) +⟨Lk| = ⟨HF|Lk +0 + ⟨Lk|. +(13) +Furthermore, we have let +ωk = ⟨Lk |[ ¯H, Rk]|HF⟩ +(14) +and defined the Fock matrix as +Fpq = hpq + +� +k +(2gpkkq − gpqkk). +(15) +Here, hpq and gpqrs are the one- and two-electron inte- +grals of the Hamiltonian. Following the conventional no- +tation, we let p, q, r, and s denote generic orbitals; i, j, k, +and l denote occupied orbitals; a, b, c, and d denote vir- +tual orbitals. Lagrangian multipliers are denoted with a +bar (¯ζµ, ¯κai, ¯ξ). +The left-state quantities in Lij, that is, ψL +i and Lj, +are constants that define Lij. They are evaluated at x0. +Thus, the Lagrangian’s dependencies are understood as +Lij = Lij(x, t, Rj, κ, ¯ζ, ¯ξ, ¯κ; x0), +(16) +where the semicolon denotes that Lij depends only para- +metrically on x0. + +3 +B. +Lagrangian stationarity conditions +The derivative coupling becomes the partial derivative +of Lij when the Lagrangian is stationary with respect to +all variables and multipliers that depend implicitly on x. +We begin by considering stationarity for Rj: +∂Lij +∂Rj +σ += Li +σ + +� +µ +¯γµAµσ +− ωj¯γσ − +� +ν +Lj +νAνσ +� +µ +¯γµRj +µ − ¯ξLj +σ = 0, +(17) +where +Aµν = ⟨µ|[ ¯H, τν]|HF⟩. +(18) +Using vector notation, this condition reads +0 = LT +i + ¯γT (A − ωj) − (ωj ¯γT Rj + ¯ξ)LT +j +(19) +Clearly, with ¯ξ = −ωj ¯γT Rj, the last term in the equation +vanishes, and we obtain stationarity provided +¯γT = +1 +ωj − ωi +LT +i . +(20) +We thus see that the excited state multipliers (¯ξ, ¯γ) can +be expressed in terms of the excited states (Li, Rj) and +the associated excitation energies (ωi, ωj). +Stationarity with respect to t yields +0 = tηT + ¯ζT A +(21) +where +tησ = ⟨Li |τσ |Rj⟩ + (F(¯γ)Rj)σ, +(22) +with the well-known27 F-matrix defined as +F(¯γ)µν = ⟨¯γ |[[ ¯H, τµ], τν]|HF⟩, +⟨¯γ| = ⟨σ|¯γσ. +(23) +Similarly, stationarity with respect to κ yields +0 = κηT + ¯κT AHF, +(24) +where +κηai = ⟨Li |E− +ai|Rj⟩ + ⟨¯ζ |[E− +ai, ¯H]|HF⟩ ++ ⟨¯γ |[[E− +ai, ¯H], Rj]|HF⟩, +(25) +and where AHF is the Hartree-Fock Hessian. The ampli- +tude and orbital conditions, given by Eqs. (21) and (24), +are solved numerically for ¯ζ and ¯κ. +C. +Derivative coupling elements +Once ¯ζ and ¯κ are known, we can evaluate the coupling +by taking the partial derivative of Lij with respect to +the nuclear components {q}. This yields19,24 +F q +ij = ⟨Li |[ ¯Hq, Rj]|HF⟩ +ωj − ωi ++ ⟨¯ζ | ¯Hq |HF⟩ + ¯κaiF q +ai, (26) +where +¯Hq = exp(−T)Hq exp(T). +(27) +Here Hq denotes the partial derivative of H with respect +to the qth nuclear coordinate, xq. By expanding the com- +mutator in Eq. (26), we obtain the equivalent expression +F q +ij = ⟨Li | ¯Hq |Rj⟩ +ωj − ωi ++ ⟨˜ζ | ¯Hq |HF⟩ + ¯κaiF q +ai, +(28) +where +˜ζ = ¯ζ − J, +Jµ = ⟨Li |Rj |µ⟩ +ωj − ωi += +jµ +ωj − ωi +. +(29) +Clearly, Fij is the sum of an excited state gradient and +a ground state gradient, plus an orbital relaxation term. +The expression in Eq. (28) is convenient when invoking +an existing molecular gradient code. +So far we have assumed that the right state (ψR +j ) is an +excited state. This raises the question of how to evaluate +the coupling when ψR +j is the ground state (j = 0). When +this is the case, the excited state condition in Lij can be +removed. As a result, the t stationarity simplifies to +0 = LT +i + ¯ζT A, +(30) +so that +¯ζT = − 1 +ωi +LT +i = +1 +E0 − Ei +LT +i , +(31) +where Ek denotes the electronic energy of the kth state. +The orbital multiplier equation is also simplified by the +removal of ⟨¯γ|, but this equation must still be solved nu- +merically. Once ¯κ is known, we can evaluate Fi0 as +F q +i0 = ⟨Li | ¯Hq |HF⟩ +E0 − Ei ++ ¯κaiF q +ai. +(32) +D. +Significance of orbital connections +Hamiltonian derivatives are treated in the same way +as for molecular energy gradients. That is, we take H to +be expressed, for all x, in a non-unique orthonormal MO +(OMO) basis which is defined by an orbital connection.28 +Any orbital connection can be used, but the choice may +actually affect the expression for the derivative coupling. +In fact, as we will explain below, the formula in Eq. (26) +is only correct when we use the natural connection.19,24,28 +For other connections, such as the widely-used symmetric +connection, the partial derivative of Oij is non-zero and +must be added to the expression for Fij.25 +To show this, we express the derivative of Oij in terms +of the orbital connection. Given a connection matrix T , +we define the OMOs as +ψp = +� +q +Tpqϕq, +(33) + +4 +where the unmodified MOs (UMOs) are given as +ϕq = +� +α +Cαq(x0)χα(x). +(34) +Here, {Cαq} denotes MO coefficients, and {χα} denotes +atomic orbitals. The UMOs are generally only orthonor- +mal at x0, that is, +Srs = ⟨ϕr |ϕs⟩ ̸= δrs, +x ̸= x0. +(35) +This is, of course, why an orbital connection is required +in the first place; consistently evaluating the derivative +is most easily done in a Fock space defined by an orbital +basis that is orthonormal for all values of x. +Now, the derivative of Oij can be written28 +Oq +ij = ∂Oij +∂xq +��� +0 = +� +rs +Dij +rsY q +rs, +(36) +where Dij is the transition state density at x0, and +Y q +rs = +� +ψr +��� ∂ψs +∂xq +���� +0. +(37) +For the natural connection, we have, by construction,28 +Y q +rs = 0, +(38) +and so we can conclude that19,28 +Oq +ij = 0. +(39) +Next, let us consider the symmetric connection. In this +case, T = S−1/2, which implies that +∂Trs +∂xq += −1 +2 +∂Srs +∂xq +��� +0 = −1 +2(W q +rs + W q +sr), +(40) +where +W q +rs = +� +ϕr +��� ∂ϕs +∂xq +���� +0. +(41) +Consequently, +Y q +rs = W q +rs − 1 +2(W q +rs + W q +sr) = 1 +2(W q +rs − W q +sr), +(42) +and so +Oq +ij = +� +rs +Dij +rs +�1 +2(W q +rs − W q +sr) +� += +� +rs +�1 +2(Dij +rs − Dij +sr) +� +W q +rs. +(43) +For the symmetric connection, therefore, the derivative +of Oij is equal to the anti-symmetrized density matrix +contracted with a ket-derivative of an overlap matrix.25 +This overlap derivative is evaluated as +W q +rs = +� +αβ +CαrCβs +� +χα +��� ∂χβ +∂xq +��� +0 +� +. +(44) +For the natural connection, W q is of course not needed +for Oq +ij (which is zero). However, W q is required for the +reorthonormalization terms associated with the Hamilto- +nian. For the natural connection, the ket-derivative W q +plays the same role that the braket-derivative Sq does for +the symmetric connection.28 These reorthonormalization +terms are the same for derivative couplings and molecular +energy gradients, so we refer the reader to the literature +for more details.26 +E. +Relation to previous implementations +In the literature, the derivative coupling has been im- +plemented through a summed-state formula20,21 which is +closely related to the one presented in this work. How- +ever, we have not been able to show that the two formu- +lations are equivalent, except in the FCI limit. As we will +see, our values for the coupling deviates to some extent +from the values presented by Tajti and Szalay for the LiH +molecule.20 +III. +IMPLEMENTATION +A. +Evaluation of the derivative coupling +The derivative coupling has been implemented in a de- +velopment version of the eT program.29 The implemen- +tation builds on the recent implementation by Schnack- +Petersen et al.26 for ground and excited state molecular +gradients. Our implementation uses existing routines for +molecular gradients and two-electron densities,26 as well +as several other quantities already implemented in the eT +program,29 such as the F-matrix (F(¯γ)), the Hartree- +Fock Hessian (AHF), and the second and third terms of +κη. We apply central differences to obtain W q numer- +ically, exploiting Libint 230 to evaluate the AO overlap +integrals. +We have implemented the first term in the tη vector +and in the κη vector, that is, the terms that arise when +differentiating Oij with respect to t and κ. In the case +of CCSD, tη can be expressed as +tη +1 +ai = Li +aiRj +0 + jai += Li +aiRj +0 + +� +bj +Li +bjRj +bjai = Dij +ai +(45) +tη +1 +aibj = Li +aibjRj +0, +(46) +where Dij is the one-electron transition density. Finally: +κη1 +ai = Dij +ai − Dij +ia. +(47) +We use an existing implementation to obtain the transi- +tion density Dij.29 +Finally, we have implemented the normalization factor +N L +j , since this allows us to validate our implementation +by comparison to the exact limit. Programmable expres- +sions for this quantity can be found elsewhere.20 + +5 +FIG. 1. LiH/cc-pVQZ derivative coupling calculated for CCSD and FCI. For CCSD, we present couplings both for the direct +evaluation of the coupling (this work) and for the summed-gradient values in Ref. 20. +FIG. 2. Branching plane for CCSD/aug-cc-pVDZ conical intersection in H2S (11A2/11B1). We depict the relative electronic +energies (left) and the norm of the coupling vector (right). +B. +Optimization of minimum energy conical intersections +As numerical illustrations of the new implementation, +we have applied Bearpark et al.’s algorithm for deter- +mining minimum energy conical intersections (MECIs), +where a gradient is constructed so that it is zero when +two conditions are fulfilled: the energy difference van- +ishes and the energy gradient along the seam is zero.31 +In particular, we minimize the gradient +G = P∇E2 + 2(E2 − E1) g +||g||, +(48) +where +g = ∇(E2 − E1) +(49) +and where P is the projection onto the complement of +the g-h plane. The h vector is +h = (E2 − E1)F12. +(50) +The gradient G is used in combination with a Broyden- +Fletcher-Goldfarb-Shanno (BFGS) solver already imple- +mented in eT for geometry optimizations.26 + +0.020 +0.6 +CCSD (this work) +CCSD (this work) +CCSD (Szalay and Tajti, 2009) +CCSD (Szalay and Tajti, 2009) +0.015 +FCI +0.010 +0.4 +0.005 +from +0.2 +0.000 +Deviation +-0.005 +0.0 +-0.010 +-0.015 +-0.2 +-0.020 +2 +3 +4 +5 +6 +7 +8 +9 +2 +3 +4 +5 +6 +7 +8 +9 +Li-H bond length (angstrom) +Li-H bond length (angstrom)300 +0.005 + (Hartree) +200 +Energy +0.000 +100 +-0.005 +0 +0.03 +0.03 +0.04 +0.04 +0.00 +0.00 +0.00 +0.00 +h +h +0.04 +0.03 +0.04 +-0.03 +9 +96 +FIG. 3. Branching plane for CCSD/aug-cc-pVDZ conical intersections in HOF (11A′′/21A′′). We depict the real part of relative +electronic energies (left) and the norm of the coupling vector (right). +FIG. 4. Branching plane for CCSD/cc-pVDZ Cs minimum energy conical intersection in thymine (nπ∗/ππ∗). We depict the +relative electronic energies (left) and the norm of the coupling vector (right). +IV. +NUMERICAL EXAMPLES +A. +Comparison to earlier implementation: LiH +In Figure 1, we show the derivative coupling element +for the LiH system as a function of the Li–H bond +distance, computed with three methods: CCSD using +the direct formula (present work), CCSD using summed- +state formula (numbers taken from Tajti and Szalay20), +and the exact FCI derivative couplings (obtained with +OpenMolcas32). All calculations are performed with the +Dunning33 basis cc-pVQZ. +All three methods agree closely for all bond distances. +However, there is a slight deviation between our results +and that given in Ref. 20, see Figure 1 (right). This may +be caused by both insufficient numerical convergence (as +indicated by the uneven deviation from FCI) as well as +differences in the analytical derivative couplings, as noted +in Section II E. +In order to ensure a consistent comparison to FCI, +where states are normalized by default, we approximate +the coupling from normalized coupled cluster states, av- +eraging over the left and right coupling elements: +¯F norm +ij += F norm +ij +− F norm +ji +2 += ⟨N L +i ψL +i |∇N R +j ψR +j ⟩ − ⟨N L +j ψL +j |∇N R +i ψR +i ⟩ +2 += N L +i N R +j F12 − N L +j N R +i F21 +2 +≈ N L +i (N L +j )−1F12 − N L +j (N L +i )−1F21 +2 +(51) +Recall that this normalization procedure is only required +when we compare to methods with normalized states. +B. +Branching planes in three-atomic systems: SH2, HOF +To provide some indication as to the behavior of the +coupling in the vicinity of conical intersections, we have + +0.0015 +1000 +800 +600 +0.0000 +Energy +400 +200 +-0.0015 +0.03 +0.03 +-0.002 +-0.002 +0.00 +0.00 +0.000 +0.000 +h +h +0.002 +0.03 +0.03 +0.002 +g +g coupling (1/bohr) +500 +0.005 +400 +(Hartree) +300 +Norm of derivative +0.000 +Energy +200 +100 +0.005 +0 +0.02 +0.02 +-0.1 +-0.1 +0.00 +0.00 +0.0 +0.0 +-0.02 +0.1 +0.02 +0.1 +9 +9 +h +h7 +calculated branching planes for points of intersection in +SH2 (11A2/11B1) and HOF (11A′′/21A′′); see Figures 2 +and 3, respectively. As expected, we find a divergence at +the point of intersection in SH2 and no visible artifacts. +This is consistent with the fact that this is an intersec- +tion between states spanning different symmetries.14 The +HOF intersection, on the other hand, is defective because +the states have the same symmetry. Note that the cou- +pling still diverges as one approaches the defect. +C. +Minimum energy conical intersection: thymine +Finally, we have applied the optimization algorithm de- +scribed in Section III B to locate the nπ∗/ππ∗ minimum +energy conical intersection in thymine, restricted to nu- +clear geometries with Cs symmetry; see Figure 4. In this +calculation, we have used the cc-pVDZ basis. As for SH2, +this is a different-symmetry intersection and there is no +sign of non-physical artifacts. +V. +SUMMARY AND OUTLOOK +In this work we have presented an efficient implemen- +tation of derivative coupling elements that will enable +us to perform large-scale simulations of nonadiabatic dy- +namics at the CCSD level of theory. Chemical systems of +interest are now within the reach of CCSD dynamics us- +ing e.g. the multiple spawning framework;18 for example, +a single-point calculation on thymine with a cc-pVDZ +basis, including gradients of the nπ∗ and ππ∗ states, as +well as the coupling between them, can be performed in a +matter of minutes on a modern CPU node (see Schnack- +Petersen et al.26 for representative timings). +We emphasize that for systems where the intersect- +ing states span the same symmetry, the wavepacket may +end up in regions that encompasses a defective intersec- +tion. We then expect that corrections must be applied +to the standard CC methods in order to extract mean- +ingful results, though this will depend on the size of the +defective intersection seam, which, in turn, depends on +the truncation level. Work on extending the present im- +plementation to the similarity constrained coupled clus- +ter method (SCCSD), where such defects are completely +eliminated,15,16 is in progress. Note that the Lagrangian +approach makes such an extension straight-forward; we +simply need to add the orthogonality condition to the +Lagrangian and solve the resulting response equations. +The case that can be treated with standard coupled +cluster theory is that of intersections where the states +span different symmetries (e.g. the nπ∗ and ππ∗ states in +thymine). We may expect that such systems can be accu- +rately described in dynamics simulations where coupled +cluster theory provides the underlying electronic struc- +ture. This is the subject of a forthcoming article. +ACKNOWLEDGMENTS +We thank David M. G. Williams for enlightening dis- +cussions. This work has received funding from the Eu- +ropean Research Council (ERC) under the European +Union’s Horizon 2020 Research and Innovation Pro- +gramme (grant agreement No. 101020016). E.F.K. and +H.K. both acknowledge funding from the Research Coun- +cil of Norway through FRINATEK project 275506. We +acknowledge computing resources through UNINETT +Sigma2 – the National Infrastructure for High Perfor- +mance Computing and Data Storage in Norway, through +project number NN2962k. +1T. Wolf, R. H. Myhre, J. Cryan, S. Coriani, R. Squibb, A. Battis- +toni, N. Berrah, C. Bostedt, P. Bucksbaum, G. Coslovich, et al., +“Probing ultrafast ππ*/nπ* internal conversion in organic chro- +mophores via k-edge resonant absorption,” Nat. Commun. 8, 1–7 +(2017). +2W. Domcke, D. R. Yarkony, and H. K¨oppel, Conical intersec- +tions: theory, computation and experiment, Vol. 17 (World Sci- +entific, 2011). +3B. F. Curchod and T. J. Martinez, “Ab initio nonadiabatic quan- +tum molecular dynamics,” Chem. Rev. 118, 3305–3336 (2018). +4M. Kanno, Y. Ito, N. Shimakura, S. Koseki, H. Kono, and Y. Fu- +jimura, “Ab initio quantum dynamical analysis of ultrafast non- +radiative transitions via conical intersections in pyrazine,” Phys. +Chem. Chem. Phys. 17, 2012–2024 (2015). +5T. Horio, R. Spesyvtsev, K. Nagashima, R. A. Ingle, Y.-i. Suzuki, +and T. Suzuki, “Full observation of ultrafast cascaded radiation- +less transitions from S2(ππ∗) state of pyrazine using vacuum ul- +traviolet photoelectron imaging,” J. Chem. Phys. 145, 044306 +(2016). +6B. Mignolet, M. Kanno, N. Shimakura, S. Koseki, F. Remacle, +H. Kono, and Y. Fujimura, “Ultrafast nonradiative transition +pathways in photo-excited pyrazine: Ab initio analysis of time- +resolved vacuum ultraviolet photoelectron spectrum,” Chem. +Phys. 515, 704–709 (2018). +7K. Sun, W. Xie, L. Chen, W. Domcke, and M. F. Gelin, “Multi- +faceted spectroscopic mapping of ultrafast nonadiabatic dynam- +ics near conical intersections: A computational study,” J. Chem. +Phys. 153, 174111 (2020). +8V. Scutelnic, S. Tsuru, M. P´apai, Z. Yang, M. Epshtein, T. Xue, +E. Haugen, Y. Kobayashi, A. I. Krylov, K. B. Møller, et al., “X- +ray transient absorption reveals the 1Au (nπ*) state of pyrazine +in electronic relaxation,” Nat. Commun. 12, 1–8 (2021). +9B. O. Roos, P. R. Taylor, and P. E. Sigbahn, “A complete ac- +tive space scf method (casscf) using a density matrix formulated +super-ci approach,” Chem. Phys. 48, 157–173 (1980). +10W. Kohn and L. J. Sham, “Self-consistent equations including +exchange and correlation effects,” Phys. rev. 140, A1133 (1965). +11J. Schirmer, “Beyond the random-phase approximation: A new +approximation scheme for the polarization propagator,” Phys. +Rev. A 26, 2395 (1982). +12C. H¨attig, “Structure optimizations for excited states with cor- +related second-order methods: CC2 and ADC(2),” Advances in +quantum chemistry 50, 37–60 (2005). +13A. K¨ohn and A. Tajti, “Can coupled-cluster theory treat conical +intersections?” J. Chem. Phys. 127, 044105 (2007). +14E. F. Kjønstad, R. H. Myhre, T. J. Martinez, and H. Koch, +“Crossing conditions in coupled cluster theory,” J. Chem. Phys. +147, 164105 (2017). +15E. F. Kjønstad and H. Koch, “Resolving the notorious case of +conical intersections for coupled cluster dynamics,” The Journal +of Phys. Chem. Lett. 8, 4801–4807 (2017). +16E. F. Kjønstad and H. Koch, “An orbital invariant similarity +constrained coupled cluster model,” J. Chem. Theory Comput. +15, 5386–5397 (2019). + +8 +17J. C. Tully, “Molecular dynamics with electronic transitions,” J. +Chem. Phys. 93, 1061–1071 (1990). +18M. Ben-Nun, J. Quenneville, and T. J. Martinez, “Ab initio mul- +tiple spawning: Photochemistry from first principles quantum +molecular dynamics,” J. Phys. Chem. A 104, 5161–5175 (2000). +19O. Christiansen, “First-order nonadiabatic coupling matrix ele- +ments using coupled cluster methods. i. theory,” J. Chem. Phys. +110, 711–723 (1999). +20A. Tajti and P. G. Szalay, “Analytic evaluation of the nona- +diabatic coupling vector between excited states using equation- +of-motion coupled-cluster theory,” J. Chem. Phys. 131, 124104 +(2009). +21S. Faraji, S. Matsika, and A. I. Krylov, “Calculations of non- +adiabatic couplings within equation-of-motion coupled-cluster +framework: +Theory, implementation, and validation against +multi-reference methods,” J. Chem. Phys. 148, 044103 (2018). +22J. Gauss, A. Tajti, M. K´allay, J. F. Stanton, and P. G. Sza- +lay, “Analytic calculation of the diagonal Born-Oppenheimer cor- +rection within configuration-interaction and coupled-cluster the- +ory,” J. Chem. Phys. 125, 144111 (2006). +23K. Shamasundar, “Diagonal born–oppenheimer correction for +coupled-cluster wave-functions,” Mol. Phys. 116, 1483–1495 +(2018). +24E. F. Kjønstad and H. Koch, “Biorthonormal formalism for nona- +diabatic coupled cluster dynamics,” J. Chem. Theory Comput. +17, 127–138 (2021). +25E. G. Hohenstein, “Analytic formulation of derivative coupling +vectors for complete active space configuration interaction wave- +functions with floating occupation molecular orbitals,” J. Chem. +Phys. 145, 174110 (2016). +26A. K. Schnack-Petersen, H. Koch, S. Coriani, and E. F. Kjønstad, +“Efficient implementation of molecular CCSD gradients with +Cholesky-decomposed electron repulsion integrals,” J. Chem. +Phys. 156, 244111 (2022). +27H. Koch and P. Jørgensen, “Coupled cluster response functions,” +J. Chem. Phys. 93, 3333–3344 (1990). +28J. Olsen, K. L. Bak, K. Ruud, T. Helgaker, and P. Jørgensen, +“Orbital connections for perturbation-dependent basis sets,” +Theor. Chem. Acc. 90, 421–439 (1995). +29S. D. Folkestad, E. F. Kjønstad, R. H. Myhre, J. H. Andersen, +A. Balbi, S. Coriani, T. Giovannini, L. Goletto, T. S. Haugland, +A. Hutcheson, I.-M. Høyvik, T. Moitra, A. C. Paul, M. Scavino, +A. S. Skeidsvoll, ˚A. H. Tveten, and H. Koch, “eT 1.0: An open +source electronic structure program with emphasis on coupled +cluster and multilevel methods,” J. Chem. Phys. 152, 184103 +(2020). +30E. Valeev, “Libint: +A library for the evaluation of molecular +integrals of many-body operators over gaussian functions, version +2.7. 0-beta. 6,” (2020). +31M. J. Bearpark, M. A. Robb, and H. B. Schlegel, “A direct +method for the location of the lowest energy point on a potential +surface crossing,” Chem. Phys. Lett. 223, 269–274 (1994). +32F. Aquilante, J. Autschbach, A. Baiardi, S. Battaglia, V. A. +Borin, L. F. Chibotaru, I. Conti, L. De Vico, M. Delcey, +I. Fdez. Galv´an, et al., “Modern quantum chemistry with [open] +molcas,” J. Chem. Phys. 152, 214117 (2020). +33T. H. Dunning, “Gaussian basis sets for use in correlated molecu- +lar calculations. I. the atoms boron through neon and hydrogen,” +J. Chem. Phys. 90, 1007–1023 (1989). + diff --git a/_tFQT4oBgHgl3EQfLjUE/content/tmp_files/load_file.txt b/_tFQT4oBgHgl3EQfLjUE/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..ecf158705a3e19139c0ea3ba221f70aa51ca5f14 --- /dev/null +++ b/_tFQT4oBgHgl3EQfLjUE/content/tmp_files/load_file.txt @@ -0,0 +1,570 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf,len=569 +page_content='Communication: Non-adiabatic derivative coupling elements for the coupled cluster singles and doubles model Eirik F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad1, 2, a) and Henrik Koch2, 3 1)Department of Chemistry and The PULSE Institute, Stanford University, Stanford, California 94305, USA 2)Department of Chemistry, Norwegian University of Science and Technology, 7491 Trondheim, Norway 3)Scuola Normale Superiore, Piazza dei Cavaleri 7, 56126 Pisa, Italy (Dated: 1 February 2023) We present an efficient implementation of analytical non-adiabatic derivative coupling elements for the cou- pled cluster singles and doubles model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The derivative coupling elements are evaluated in a biorthonormal formulation in which the nuclear derivative acts on the right electronic state, where this state is biorthonormal with respect to the set of left states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This stands in contrast to earlier implementations based on normal- ized states and a gradient formula for the derivative coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' As an illustration of the implementation, we determine a minimum energy conical intersection between the nπ∗ and ππ∗ states in the nucleobase thymine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' INTRODUCTION The nuclear dynamics that follows photoexcitation typically involves non-adiabatic population transfer be- tween several electronic states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' For example, in the nucle- obase thymine, photoexcitation to the bright ππ∗ state is followed by rapid (60 fs) non-adiabatic population trans- fer to the dark nπ∗ state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='1 As is well known, the approx- imate description of the electronic structure can have a dramatic qualitative impact on the simulated nuclear dy- namics, often complicating the task of correctly identify- ing the actual physics behind the processes observed in pump-probe experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='2,3 A recent example is the on- going debate about the dynamics that follows excitation to the bright B3u state in pyrazine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='4–8 The ambiguities involved in interpreting time-resolved spectra illustrate the need for highly accurate description of the electronic structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' A number of electronic structure methods has a long history of being applied to treat non-adiabatic effects, in- cluding complete active space9 (CAS) methods, density functional theory10 (DFT), and algebraic diagrammatic construction11 (ADC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' These methods are often comple- mentary, where some are able to describe static correla- tion in the ground state and ground state intersections (CAS) while others better capture dynamical correlation but are unable to treat static correlation in the ground state as well as actual crossings with the ground state (DFT, ADC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' In the latter category, there is still a need for a method that has systematically improvable accu- racy that extends beyond a perturbative description of double excitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Coupled cluster theory is now well-established as the method of choice whenever this level of accuracy is re- quired and the ground state is accurately described by a single determinant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' However, initial progress towards a)Electronic mail: eirik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='kjonstad@ntnu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='no its use in nonadiabatic dynamics simulations was slowed down with the realization12,13 that the method produces non-physical results at electronic degeneracies when the states that cross span the same symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Later work by the present authors and collaborators showed that these artifacts were caused by the loss of electronic state or- thogonality (matrix defects)14 and that they could be fully removed by enforcing orthogonality relations be- tween the electronic states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='15,16 Our current understand- ing is that coupled cluster methods are able to de- scribe conical intersections when the states span different symmetries but corrections13,15,16 are required when the states span the same symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' However, these conclu- sions are based on studies of the potential energy surfaces and not from considerations of the predicted physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' It still remains an open question to what extent the artifacts at same-symmetry intersections negatively affect the pre- dicted dynamics in trajectory-based simulation methods like surface hopping17 and ab initio multiple spawning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='18 Already in 1999 Christiansen19 derived expressions for the derivative coupling elements in coupled cluster the- ory, but the first implementation was given later by Tajti and Szalay20 at the singles and doubles level (CCSD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' These authors did not, however, implement the expres- sions in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Instead, the coupling was evaluated from the gradient of the two states as well as the gra- dient of a fictitious summed state;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' this summed-state approach was also used in a more recent implementa- tion of the CCSD coupling elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='21 In addition, they proposed modifications to account for the fact that the coupled cluster states are not normalized, building on earlier work by Gauss and coworkers22 who had found that normalization is important when evaluating the di- agonal Born-Oppenheimer correction to the energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The need for normalization in dynamics, which is not trivial to achieve, was later questioned by Shamasundar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='23 In a recent publication, we confirmed this by showing that a biorthonormal formalism exists in which there is no dependence on the norm of the electronic states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='24 In the present work, we provide a derivation (which is arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='13264v1 [physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='chem-ph] 30 Jan 2023 2 equivalent to Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 19) and implementation, at the CCSD level of theory, of the derivative coupling between ground and excited states as well as between excited states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The derivation follows the Lagrangian approach for the derivative coupling proposed by Hohenstein in the con- text of CAS configuration interaction (CASCI),25 while the present implementation builds on an efficient imple- mentation of analytical gradients, exploiting Cholesky decomposed electronic repulsion integrals, recently pub- lished by the authors and collaborators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='26 II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' THEORY A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Lagrangian The derivative coupling between states i and j is19,24 Fij = ⟨ψL i |∇ψR j ⟩, i, j = 0, 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' , (1) where L and R signify that these are the left and right electronic states, and the gradient ∇ is taken with respect to the coordinates of the atomic nuclei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Analytical expressions for Fij may be derived by using the Lagrangian technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Here, we use the Lagrangian proposed by Hohenstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='25 For the coupled cluster case, this Lagrangian can be expressed as24 Lij = Oij + conditions (2) where Oij = ⟨ψL i (x0)|ψR j (x)⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (3) Here we have made the dependence on the nuclear ge- ometry explicit: x0 is the geometry where the derivative is to be evaluated, while x is allowed to vary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Upon dif- ferentiating Lij, the derivative operation ∇ only acts on the ket vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' As a result, the derivative of Lij at x0 is identical to Fij at x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='24,25 The conditions in Lij are those that are required to specify the right state ψR j for all values of x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' These are: the Hartree-Fock equations, for specifying the orbitals;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' the amplitude equations, for specifying the ground state cluster amplitudes;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' and the excited state eigenvalue equa- tions, for specifying the excited state amplitudes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Writ- ten out in detail, the Lagrangian reads Lij = Oij + � µ ¯ζµ⟨µ| ¯H |HF⟩ + � µ ¯γµ � ⟨µ|[ ¯H, Rj]|HF⟩ − ωjRj µ � + ¯ξ(1 − ⟨Lj |Rj⟩) + � ai ¯κaiFai, (4) where we have suppressed the dependence on x0 for no- tational convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This expression for Lij introduces various quantities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The coupled cluster conditions are expressed in terms of the similarity-transformed Hamiltonian ¯H = exp(−T) exp(κ)H exp(−κ) exp(T), (5) where we have introduced the orbital rotation operator κ = � ai κaiE− ai, E− ai = Eai − Eia, (6) as well as the cluster operator T = � µ tµτµ (7) The scalars tµ are known as cluster amplitudes, and the τµ denote excitation operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The Eai are singlet one- electron excitation operators and Eia are corresponding deexcitation operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Here, κ(x0) = 0 by assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The electronic states are conveniently expressed as |ψR k ⟩ = Rk exp(T)|HF⟩ (8) ⟨ψL k | = ⟨HF|Lk exp(−T) (9) where Rk = Rk 0 + Rk = Rk 0 + � µ Rk µτµ (10) Lk = Lk 0 + Lk = Lk 0 + � µ Lk µτ † µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (11) We will also find it useful to write |Rk⟩ = Rk 0|HF⟩ + |Rk⟩ (12) ⟨Lk| = ⟨HF|Lk 0 + ⟨Lk|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (13) Furthermore, we have let ωk = ⟨Lk |[ ¯H, Rk]|HF⟩ (14) and defined the Fock matrix as Fpq = hpq + � k (2gpkkq − gpqkk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (15) Here, hpq and gpqrs are the one- and two-electron inte- grals of the Hamiltonian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Following the conventional no- tation, we let p, q, r, and s denote generic orbitals;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' i, j, k, and l denote occupied orbitals;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' a, b, c, and d denote vir- tual orbitals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Lagrangian multipliers are denoted with a bar (¯ζµ, ¯κai, ¯ξ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The left-state quantities in Lij, that is, ψL i and Lj, are constants that define Lij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' They are evaluated at x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Thus, the Lagrangian’s dependencies are understood as Lij = Lij(x, t, Rj, κ, ¯ζ, ¯ξ, ¯κ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' x0), (16) where the semicolon denotes that Lij depends only para- metrically on x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 3 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Lagrangian stationarity conditions The derivative coupling becomes the partial derivative of Lij when the Lagrangian is stationary with respect to all variables and multipliers that depend implicitly on x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We begin by considering stationarity for Rj: ∂Lij ∂Rj σ = Li σ + � µ ¯γµAµσ − ωj¯γσ − � ν Lj νAνσ � µ ¯γµRj µ − ¯ξLj σ = 0, (17) where Aµν = ⟨µ|[ ¯H, τν]|HF⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (18) Using vector notation, this condition reads 0 = LT i + ¯γT (A − ωj) − (ωj ¯γT Rj + ¯ξ)LT j (19) Clearly, with ¯ξ = −ωj ¯γT Rj, the last term in the equation vanishes, and we obtain stationarity provided ¯γT = 1 ωj − ωi LT i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (20) We thus see that the excited state multipliers (¯ξ, ¯γ) can be expressed in terms of the excited states (Li, Rj) and the associated excitation energies (ωi, ωj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Stationarity with respect to t yields 0 = tηT + ¯ζT A (21) where tησ = ⟨Li |τσ |Rj⟩ + (F(¯γ)Rj)σ, (22) with the well-known27 F-matrix defined as F(¯γ)µν = ⟨¯γ |[[ ¯H, τµ], τν]|HF⟩, ⟨¯γ| = ⟨σ|¯γσ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (23) Similarly, stationarity with respect to κ yields 0 = κηT + ¯κT AHF, (24) where κηai = ⟨Li |E− ai|Rj⟩ + ⟨¯ζ |[E− ai, ¯H]|HF⟩ + ⟨¯γ |[[E− ai, ¯H], Rj]|HF⟩, (25) and where AHF is the Hartree-Fock Hessian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The ampli- tude and orbital conditions, given by Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (21) and (24), are solved numerically for ¯ζ and ¯κ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Derivative coupling elements Once ¯ζ and ¯κ are known, we can evaluate the coupling by taking the partial derivative of Lij with respect to the nuclear components {q}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This yields19,24 F q ij = ⟨Li |[ ¯Hq, Rj]|HF⟩ ωj − ωi + ⟨¯ζ | ¯Hq |HF⟩ + ¯κaiF q ai, (26) where ¯Hq = exp(−T)Hq exp(T).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (27) Here Hq denotes the partial derivative of H with respect to the qth nuclear coordinate, xq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' By expanding the com- mutator in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (26), we obtain the equivalent expression F q ij = ⟨Li | ¯Hq |Rj⟩ ωj − ωi + ⟨˜ζ | ¯Hq |HF⟩ + ¯κaiF q ai, (28) where ˜ζ = ¯ζ − J, Jµ = ⟨Li |Rj |µ⟩ ωj − ωi = jµ ωj − ωi .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (29) Clearly, Fij is the sum of an excited state gradient and a ground state gradient, plus an orbital relaxation term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The expression in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (28) is convenient when invoking an existing molecular gradient code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' So far we have assumed that the right state (ψR j ) is an excited state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This raises the question of how to evaluate the coupling when ψR j is the ground state (j = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' When this is the case, the excited state condition in Lij can be removed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' As a result, the t stationarity simplifies to 0 = LT i + ¯ζT A, (30) so that ¯ζT = − 1 ωi LT i = 1 E0 − Ei LT i , (31) where Ek denotes the electronic energy of the kth state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The orbital multiplier equation is also simplified by the removal of ⟨¯γ|, but this equation must still be solved nu- merically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Once ¯κ is known, we can evaluate Fi0 as F q i0 = ⟨Li | ¯Hq |HF⟩ E0 − Ei + ¯κaiF q ai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (32) D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Significance of orbital connections Hamiltonian derivatives are treated in the same way as for molecular energy gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' That is, we take H to be expressed, for all x, in a non-unique orthonormal MO (OMO) basis which is defined by an orbital connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='28 Any orbital connection can be used, but the choice may actually affect the expression for the derivative coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' In fact, as we will explain below, the formula in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (26) is only correct when we use the natural connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='19,24,28 For other connections, such as the widely-used symmetric connection, the partial derivative of Oij is non-zero and must be added to the expression for Fij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='25 To show this, we express the derivative of Oij in terms of the orbital connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Given a connection matrix T , we define the OMOs as ψp = � q Tpqϕq, (33) 4 where the unmodified MOs (UMOs) are given as ϕq = � α Cαq(x0)χα(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (34) Here, {Cαq} denotes MO coefficients, and {χα} denotes atomic orbitals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The UMOs are generally only orthonor- mal at x0, that is, Srs = ⟨ϕr |ϕs⟩ ̸= δrs, x ̸= x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (35) This is, of course, why an orbital connection is required in the first place;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' consistently evaluating the derivative is most easily done in a Fock space defined by an orbital basis that is orthonormal for all values of x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Now, the derivative of Oij can be written28 Oq ij = ∂Oij ∂xq ��� 0 = � rs Dij rsY q rs, (36) where Dij is the transition state density at x0, and Y q rs = � ψr ��� ∂ψs ∂xq ���� 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (37) For the natural connection, we have, by construction,28 Y q rs = 0, (38) and so we can conclude that19,28 Oq ij = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (39) Next, let us consider the symmetric connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' In this case, T = S−1/2, which implies that ∂Trs ∂xq = −1 2 ∂Srs ∂xq ��� 0 = −1 2(W q rs + W q sr), (40) where W q rs = � ϕr ��� ∂ϕs ∂xq ���� 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (41) Consequently, Y q rs = W q rs − 1 2(W q rs + W q sr) = 1 2(W q rs − W q sr), (42) and so Oq ij = � rs Dij rs �1 2(W q rs − W q sr) � = � rs �1 2(Dij rs − Dij sr) � W q rs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (43) For the symmetric connection, therefore, the derivative of Oij is equal to the anti-symmetrized density matrix contracted with a ket-derivative of an overlap matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='25 This overlap derivative is evaluated as W q rs = � αβ CαrCβs � χα ��� ∂χβ ∂xq ��� 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (44) For the natural connection, W q is of course not needed for Oq ij (which is zero).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' However, W q is required for the reorthonormalization terms associated with the Hamilto- nian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' For the natural connection, the ket-derivative W q plays the same role that the braket-derivative Sq does for the symmetric connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='28 These reorthonormalization terms are the same for derivative couplings and molecular energy gradients, so we refer the reader to the literature for more details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='26 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Relation to previous implementations In the literature, the derivative coupling has been im- plemented through a summed-state formula20,21 which is closely related to the one presented in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' How- ever, we have not been able to show that the two formu- lations are equivalent, except in the FCI limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' As we will see, our values for the coupling deviates to some extent from the values presented by Tajti and Szalay for the LiH molecule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='20 III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' IMPLEMENTATION A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Evaluation of the derivative coupling The derivative coupling has been implemented in a de- velopment version of the eT program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='29 The implemen- tation builds on the recent implementation by Schnack- Petersen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='26 for ground and excited state molecular gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Our implementation uses existing routines for molecular gradients and two-electron densities,26 as well as several other quantities already implemented in the eT program,29 such as the F-matrix (F(¯γ)), the Hartree- Fock Hessian (AHF), and the second and third terms of κη.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We apply central differences to obtain W q numer- ically, exploiting Libint 230 to evaluate the AO overlap integrals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We have implemented the first term in the tη vector and in the κη vector, that is, the terms that arise when differentiating Oij with respect to t and κ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' In the case of CCSD, tη can be expressed as tη 1 ai = Li aiRj 0 + jai = Li aiRj 0 + � bj Li bjRj bjai = Dij ai (45) tη 1 aibj = Li aibjRj 0, (46) where Dij is the one-electron transition density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Finally: κη1 ai = Dij ai − Dij ia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (47) We use an existing implementation to obtain the transi- tion density Dij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='29 Finally, we have implemented the normalization factor N L j , since this allows us to validate our implementation by comparison to the exact limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Programmable expres- sions for this quantity can be found elsewhere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='20 5 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' LiH/cc-pVQZ derivative coupling calculated for CCSD and FCI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' For CCSD, we present couplings both for the direct evaluation of the coupling (this work) and for the summed-gradient values in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Branching plane for CCSD/aug-cc-pVDZ conical intersection in H2S (11A2/11B1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We depict the relative electronic energies (left) and the norm of the coupling vector (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Optimization of minimum energy conical intersections As numerical illustrations of the new implementation, we have applied Bearpark et al.’s algorithm for deter- mining minimum energy conical intersections (MECIs), where a gradient is constructed so that it is zero when two conditions are fulfilled: the energy difference van- ishes and the energy gradient along the seam is zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='31 In particular, we minimize the gradient G = P∇E2 + 2(E2 − E1) g ||g||, (48) where g = ∇(E2 − E1) (49) and where P is the projection onto the complement of the g-h plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The h vector is h = (E2 − E1)F12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' (50) The gradient G is used in combination with a Broyden- Fletcher-Goldfarb-Shanno (BFGS) solver already imple- mented in eT for geometry optimizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='6 CCSD (this work) CCSD (this work) CCSD (Szalay and Tajti, 2009) CCSD (Szalay and Tajti, 2009) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='015 FCI 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='005 from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='000 Deviation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='020 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 Li-H bond length (angstrom) Li-H bond length (angstrom)300 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='005 (Hartree) 200 Energy 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='000 100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='005 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 h h 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 9 96 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Branching plane for CCSD/aug-cc-pVDZ conical intersections in HOF (11A′′/21A′′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We depict the real part of relative electronic energies (left) and the norm of the coupling vector (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Branching plane for CCSD/cc-pVDZ Cs minimum energy conical intersection in thymine (nπ∗/ππ∗).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We depict the relative electronic energies (left) and the norm of the coupling vector (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' NUMERICAL EXAMPLES A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Comparison to earlier implementation: LiH In Figure 1, we show the derivative coupling element for the LiH system as a function of the Li–H bond distance, computed with three methods: CCSD using the direct formula (present work), CCSD using summed- state formula (numbers taken from Tajti and Szalay20), and the exact FCI derivative couplings (obtained with OpenMolcas32).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' All calculations are performed with the Dunning33 basis cc-pVQZ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' All three methods agree closely for all bond distances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' However, there is a slight deviation between our results and that given in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 20, see Figure 1 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This may be caused by both insufficient numerical convergence (as indicated by the uneven deviation from FCI) as well as differences in the analytical derivative couplings, as noted in Section II E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' In order to ensure a consistent comparison to FCI,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' where states are normalized by default,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' we approximate the coupling from normalized coupled cluster states,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' av- eraging over the left and right coupling elements: ¯F norm ij = F norm ij − F norm ji 2 = ⟨N L i ψL i |∇N R j ψR j ⟩ − ⟨N L j ψL j |∇N R i ψR i ⟩ 2 = N L i N R j F12 − N L j N R i F21 2 ≈ N L i (N L j )−1F12 − N L j (N L i )−1F21 2 (51) Recall that this normalization procedure is only required when we compare to methods with normalized states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Branching planes in three-atomic systems: SH2, HOF To provide some indication as to the behavior of the coupling in the vicinity of conical intersections, we have 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0015 1000 800 600 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0000 Energy 400 200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='000 h h 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='002 g g coupling (1/bohr) 500 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='005 400 (Hartree) 300 Norm of derivative 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='000 Energy 200 100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='005 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='1 9 9 h h7 calculated branching planes for points of intersection in SH2 (11A2/11B1) and HOF (11A′′/21A′′);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' see Figures 2 and 3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' As expected, we find a divergence at the point of intersection in SH2 and no visible artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This is consistent with the fact that this is an intersec- tion between states spanning different symmetries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='14 The HOF intersection, on the other hand, is defective because the states have the same symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Note that the cou- pling still diverges as one approaches the defect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Minimum energy conical intersection: thymine Finally, we have applied the optimization algorithm de- scribed in Section III B to locate the nπ∗/ππ∗ minimum energy conical intersection in thymine, restricted to nu- clear geometries with Cs symmetry;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' see Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' In this calculation, we have used the cc-pVDZ basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' As for SH2, this is a different-symmetry intersection and there is no sign of non-physical artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' SUMMARY AND OUTLOOK In this work we have presented an efficient implemen- tation of derivative coupling elements that will enable us to perform large-scale simulations of nonadiabatic dy- namics at the CCSD level of theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chemical systems of interest are now within the reach of CCSD dynamics us- ing e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' the multiple spawning framework;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='18 for example, a single-point calculation on thymine with a cc-pVDZ basis, including gradients of the nπ∗ and ππ∗ states, as well as the coupling between them, can be performed in a matter of minutes on a modern CPU node (see Schnack- Petersen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='26 for representative timings).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We emphasize that for systems where the intersect- ing states span the same symmetry, the wavepacket may end up in regions that encompasses a defective intersec- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We then expect that corrections must be applied to the standard CC methods in order to extract mean- ingful results, though this will depend on the size of the defective intersection seam, which, in turn, depends on the truncation level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Work on extending the present im- plementation to the similarity constrained coupled clus- ter method (SCCSD), where such defects are completely eliminated,15,16 is in progress.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Note that the Lagrangian approach makes such an extension straight-forward;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' we simply need to add the orthogonality condition to the Lagrangian and solve the resulting response equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' The case that can be treated with standard coupled cluster theory is that of intersections where the states span different symmetries (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' the nπ∗ and ππ∗ states in thymine).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We may expect that such systems can be accu- rately described in dynamics simulations where coupled cluster theory provides the underlying electronic struc- ture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This is the subject of a forthcoming article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' ACKNOWLEDGMENTS We thank David M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Williams for enlightening dis- cussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' This work has received funding from the Eu- ropean Research Council (ERC) under the European Union’s Horizon 2020 Research and Innovation Pro- gramme (grant agreement No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 101020016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' both acknowledge funding from the Research Coun- cil of Norway through FRINATEK project 275506.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' We acknowledge computing resources through UNINETT Sigma2 – the National Infrastructure for High Perfor- mance Computing and Data Storage in Norway, through project number NN2962k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 1T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Wolf, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Myhre, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Cryan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Coriani, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Squibb, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Battis- toni, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Berrah, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Bostedt, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Bucksbaum, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Coslovich, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=', “Probing ultrafast ππ*/nπ* internal conversion in organic chro- mophores via k-edge resonant absorption,” Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 8, 1–7 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 2W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Domcke, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Yarkony, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' K¨oppel, Conical intersec- tions: theory, computation and experiment, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 17 (World Sci- entific, 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 3B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Curchod and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Martinez, “Ab initio nonadiabatic quan- tum molecular dynamics,” Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 118, 3305–3336 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 4M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kanno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Ito, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Shimakura, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koseki, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kono, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Fu- jimura, “Ab initio quantum dynamical analysis of ultrafast non- radiative transitions via conical intersections in pyrazine,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 17, 2012–2024 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 5T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Horio, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Spesyvtsev, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Nagashima, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Ingle, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='-i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Suzuki, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Suzuki, “Full observation of ultrafast cascaded radiation- less transitions from S2(ππ∗) state of pyrazine using vacuum ul- traviolet photoelectron imaging,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 145, 044306 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 6B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Mignolet, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kanno, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Shimakura, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koseki, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Remacle, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kono, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Fujimura, “Ultrafast nonradiative transition pathways in photo-excited pyrazine: Ab initio analysis of time- resolved vacuum ultraviolet photoelectron spectrum,” Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 515, 704–709 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 7K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Sun, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Xie, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chen, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Domcke, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Gelin, “Multi- faceted spectroscopic mapping of ultrafast nonadiabatic dynam- ics near conical intersections: A computational study,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 153, 174111 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 8V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Scutelnic, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Tsuru, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' P´apai, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Yang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Epshtein, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Xue, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Haugen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kobayashi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Krylov, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Møller, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=', “X- ray transient absorption reveals the 1Au (nπ*) state of pyrazine in electronic relaxation,” Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 12, 1–8 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 9B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Roos, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Taylor, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Sigbahn, “A complete ac- tive space scf method (casscf) using a density matrix formulated super-ci approach,” Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 48, 157–173 (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 10W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kohn and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Sham, “Self-consistent equations including exchange and correlation effects,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 140, A1133 (1965).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 11J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Schirmer, “Beyond the random-phase approximation: A new approximation scheme for the polarization propagator,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' A 26, 2395 (1982).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 12C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H¨attig, “Structure optimizations for excited states with cor- related second-order methods: CC2 and ADC(2),” Advances in quantum chemistry 50, 37–60 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 13A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' K¨ohn and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Tajti, “Can coupled-cluster theory treat conical intersections?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 127, 044105 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 14E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Myhre, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Martinez, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch, “Crossing conditions in coupled cluster theory,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 147, 164105 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 15E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch, “Resolving the notorious case of conical intersections for coupled cluster dynamics,” The Journal of Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 8, 4801–4807 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 16E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch, “An orbital invariant similarity constrained coupled cluster model,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Theory Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 15, 5386–5397 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 8 17J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Tully, “Molecular dynamics with electronic transitions,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 93, 1061–1071 (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 18M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Ben-Nun, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Quenneville, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Martinez, “Ab initio mul- tiple spawning: Photochemistry from first principles quantum molecular dynamics,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' A 104, 5161–5175 (2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 19O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Christiansen, “First-order nonadiabatic coupling matrix ele- ments using coupled cluster methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' theory,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 110, 711–723 (1999).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 20A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Tajti and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Szalay, “Analytic evaluation of the nona- diabatic coupling vector between excited states using equation- of-motion coupled-cluster theory,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 131, 124104 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 21S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Faraji, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Matsika, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Krylov, “Calculations of non- adiabatic couplings within equation-of-motion coupled-cluster framework: Theory, implementation, and validation against multi-reference methods,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 148, 044103 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 22J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Gauss, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Tajti, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' K´allay, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Stanton, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Sza- lay, “Analytic calculation of the diagonal Born-Oppenheimer cor- rection within configuration-interaction and coupled-cluster the- ory,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 125, 144111 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 23K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Shamasundar, “Diagonal born–oppenheimer correction for coupled-cluster wave-functions,” Mol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 116, 1483–1495 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 24E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch, “Biorthonormal formalism for nona- diabatic coupled cluster dynamics,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Theory Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 17, 127–138 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 25E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Hohenstein, “Analytic formulation of derivative coupling vectors for complete active space configuration interaction wave- functions with floating occupation molecular orbitals,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 145, 174110 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 26A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Schnack-Petersen, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Coriani, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad, “Efficient implementation of molecular CCSD gradients with Cholesky-decomposed electron repulsion integrals,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 156, 244111 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 27H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Jørgensen, “Coupled cluster response functions,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 93, 3333–3344 (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 28J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Olsen, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Bak, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Ruud, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Helgaker, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Jørgensen, “Orbital connections for perturbation-dependent basis sets,” Theor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 90, 421–439 (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 29S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Folkestad, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Kjønstad, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Myhre, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Andersen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Balbi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Coriani, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Giovannini, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Goletto, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Haugland, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Hutcheson, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Høyvik, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Moitra, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Paul, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Scavino, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Skeidsvoll, ˚A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Tveten, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Koch, “eT 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='0: An open source electronic structure program with emphasis on coupled cluster and multilevel methods,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 152, 184103 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 30E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Valeev, “Libint: A library for the evaluation of molecular integrals of many-body operators over gaussian functions, version 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 0-beta.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 6,” (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 31M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Bearpark, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Robb, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Schlegel, “A direct method for the location of the lowest energy point on a potential surface crossing,” Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 223, 269–274 (1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 32F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Aquilante, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Autschbach, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Baiardi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Battaglia, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Borin, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chibotaru, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Conti, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' De Vico, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Delcey, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Fdez.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Galv´an, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=', “Modern quantum chemistry with [open] molcas,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 152, 214117 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 33T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Dunning, “Gaussian basis sets for use in correlated molecu- lar calculations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' the atoms boron through neon and hydrogen,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} +page_content=' 90, 1007–1023 (1989).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/_tFQT4oBgHgl3EQfLjUE/content/2301.13264v1.pdf'} diff --git a/adE4T4oBgHgl3EQfoA0n/vector_store/index.faiss b/adE4T4oBgHgl3EQfoA0n/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..71d0769ded49bb7df4109c2ed73a65af54c88732 --- /dev/null +++ b/adE4T4oBgHgl3EQfoA0n/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8921ef988158a54a462bf28783ea237f268113be14fbe7e7571039b434eddb91 +size 4063277 diff --git a/e9E2T4oBgHgl3EQfxgi1/content/tmp_files/2301.04112v1.pdf.txt b/e9E2T4oBgHgl3EQfxgi1/content/tmp_files/2301.04112v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..89fc966367d1c12a0b41b39fe310567916a143e3 --- /dev/null +++ b/e9E2T4oBgHgl3EQfxgi1/content/tmp_files/2301.04112v1.pdf.txt @@ -0,0 +1,1097 @@ +arXiv:2301.04112v1 [math-ph] 10 Jan 2023 +Spin glass phase at zero temperature in the +Edwards–Anderson model +Sourav Chatterjee* +Stanford University +January 11, 2023 +Abstract +This article solves two open problems about the Edwards–Anderson model of short- +range spin glasses (in all dimensions). First, it is shown that the ground state is sensitive +to small perturbations of the disorder, in the sense that a small amount of noise gives rise +to a new ground state that is nearly orthogonal to the old one with respect to the site over- +lap inner product. Second, it is shown that one can overturn a macroscopic fraction of the +spins in the ground state with an energy cost that is negligible compared to the size of the +boundary of the overturned region — a feature that is believed to be typical of spin glasses +but clearly absent in ferromagnets. Together, these comprise the first mathematical proof of +glassy behavior in a short-range spin glass model. +Key words and phrases. Edwards–Anderson model, disorder chaos, spin glass. +2020 Mathematics Subject Classification. 82B44, 82D30. +1 +Introduction +The Edwards–Anderson (EA) model was introduced in [28] as a realistic model of a spin glass +in finite dimensions with short-range interactions. In contrast to the Sherrington–Kirkpatrick +(SK) model of spin glasses with mean-field interactions [49], which has been analyzed with +tremendous success [46, 53, 54], the analysis of the EA model remains an elusive goal in both +mathematics and physics. In particular, one question that has remained beyond the reach of +mathematical proof is whether the EA model indeed exhibits the physical characteristics of a +true glassy material at low enough temperature. But the question goes beyond the nitty-gritty of +mathematical rigor; even physicists are not unanimous about the true nature of the EA model. +For more about this longstanding debate, see [14, 31, 38–40, 44, 45] and the references therein. +*Mailing address: Department of Statistics, Stanford University, 390 Jane Stanford Way, Stanford, CA 94305, +USA. Email: souravc@stanford.edu. +1 + +1.1 +The model +Let G be a finite, simple, connected graph with vertex set V and edge set E. Let J = (Je)e∈E be +a collection of i.i.d. random variables with a given law µ. The Edwards–Anderson Hamiltonian +on this graph in the environment (or disorder, or bond strengths, or edge weights) J is the random +function HJ : {−1, 1}V → R defined as +HJ(σ) := − +� +{i,j}∈E +Jijσiσj. +A ground state for this model is a state σ (depending on J) that minimizes the above Hamilto- +nian. If µ has no atoms, then it is not hard to show that with probability one, there are exactly +two ground states σ and −σ. +What we have described above is the ground state under the free boundary condition. Some- +times we impose a boundary condition, in the following way. Let B be a nonempty subset of +V and γ be a fixed element of {−1, 1}B. Then the ground state under boundary condition γ on +the boundary B is the minimizer of HJ(σ) under the constraint that σi = γi for each i ∈ B. +Again, it is not hard to show that under a boundary condition, there is a unique ground state +with probability one if µ has no atoms, provided that V \ B is a connected subset of V . We will +henceforth assume that V \ B is connected. +To fix ideas, the reader can think of G as the cube {0, 1, . . . , L}d in Zd, with the usual +nearest-neighbor edges. In the absence of a boundary condition, we have the EA Hamiltonian +with free boundary condition on this cube. The usual boundary B in this setting is the set of +vertices that forms the boundary of the cube (i.e., at least one coordinate is 0 or L). Alternatively, +one can identify vertices belonging to opposite faces; the free boundary model in this case is +what’s called the EA model on the cube with periodic boundary conditions. +The EA model at inverse temperature β assigns a probability measure with mass proportional +to e−βH(σ) at each σ. The β = ∞ (zero temperature) model is just the probability measure that +puts all its mass on the ground state (or the uniform distribution on the pair of ground states in +the free boundary case). In this paper, we will only consider the zero temperature model. Also, +throughout, we will take the disorder distribution µ to be the standard Gaussian distribution, +although various parts of the proofs should work for quite general distributions. +Incidentally, one of the difficulties in analyzing the ground state of the EA model is that find- +ing the ground state is the same as finding the maximum cut in the weighted complete graph on +V with edges weights (Je)e∈E. The maximum cut problem is NP-hard (for general graphs [34], +although not for planar graphs [50]), which makes finding the ground state also NP-hard. +1.2 +Results +Our first main result is that the ground state of the EA model with standard Gaussian disorder is +sensitive to small changes in the disorder J, a phenomenon that is sometimes called “disorder +chaos”. We consider two kinds of perturbations, both determined by a parameter p ∈ (0, 1). +In the first kind of perturbation, we replace each Je by (1 − p)Je + +� +2p − p2J′ +e, where J′ = +(J′ +e)e∈E is another set of i.i.d. standard Gaussian random variables, independent of J. The +coefficients in front of Je and J′ +e are chosen to ensure that the linear combination is again a +2 + +standard Gaussian random variable. In the second kind of perturbation, each Je is replaced by +J′ +e with probability p, independently of each other. +Let V ◦ := V \ B denote the set of “interior vertices” of V . Note that V ◦ = V when B = ∅ +(the case of free boundary). We have already assumed earlier that V ◦ is connected. To avoid +trivialities, we will assume that V ◦ is nonempty and |E| ≥ 2. Let σ be the ground state in the +original environment and σ′ be the ground state in the perturbed environment. The “site overlap” +between the two configurations is defined as +R(p) := +1 +|V ◦| +� +i∈V ◦ +σiσ′ +i. +If B = ∅ (i.e., for the free boundary condition), R(p) is not well-defined since there are two +ground states in both environments. But R(p)2 is still well-defined, and that is sufficient for our +purposes. Note that R(p) is close to zero if and only if σ and σ′ are nearly orthogonal to each +other — or in other words, σ and σ′ disagree on approximately half the vertices. The following +theorem shows that under certain conditions, R(p) ≈ 0 with high probability for a tiny value +of p, which is what’s commonly known as disorder chaos for the site overlap. We first state the +result for a general graph G, and then specialize to the case of a cube in Zd in the corollary that +follows. +Theorem 1.1. Let all notations be as above. Let d denote the graph distance on G. Suppose +that there are positive constants α, β, γ and δ such that for any i ∈ V ◦ and r ≥ 1, the number of +j such that d(i, j) ≤ r is at most αrβ, and the number of j such that min{d(j, k) : k ∈ B} ≤ r +is at most γ|B|rδ. Then for both kinds of perturbations, we have that for any p ∈ (0, 1), +E(R(p)2) ≤ +1 +|V ◦| + C(|V ◦|p−β + |B|2p−2δ) +|V ◦|2 +, +where C is a constant depending only on α, β, γ and δ. +Let us now check what this yields for V = {0, 1, . . . , L}d with the usual boundary, for some +dimension d ≥ 1 (not to be confused with the graph distance d). In this case, |V ◦| is of order +Ld, |B| is of order Ld−1, β = d, and δ = 1. Thus, we get the following corollary. +Corollary 1.2. If V = {0, 1, . . . , L}d with the usual boundary and with any given disorder- +independent boundary condition, then for both kinds of perturbations, we have that for all p ∈ +(0, 1), +E(R(p)2) ≤ +� +C(d)L−1p−1 +if d = 1, +C(d)L−2p−2 +if d ≥ 2, +where C(d) depends only on d. For free or periodic boundary, the bound is +E(R(p)2) ≤ C(d) +Ldpd +for all d ≥ 1. +This shows that R(p) ≈ 0 with high probability whenever p ≫ L−1. In other words, if +p ≫ L−1, σ and σ′ disagree at approximately half the sites. This is a mathematical proof of +3 + +the conjecture (made 35 years ago in [15], with heuristic justification) that the ground state of +the EA model is chaotic under small perturbations of the disorder. It is not clear if the threshold +L−1 can be improved. Simulations suggest that improvements may be possible [15]. +The proof of Theorem 1.1 also yields the following result, which justifies the claim made +in [15] that the glassy nature of the EA model at zero temperature is characterized by a chaotic +phase in which the “relative orientations of spins with large separations are sensitive to small +changes in the bond strengths”. +Theorem 1.3. In the setting of Theorem 1.1, take any p ∈ (0, 1), and let σ and σ′ be the ground +states of the unperturbed system and the system with perturbation parameter p (for either kind +of perturbation), respectively. Then for any i, j ∈ V ◦, +|E(σiσjσ′ +iσ′ +j)| ≤ (1 − p)min{d(i,j),d(i,B)+d(j,B)}, +where d(i, B) := min{d(i, k) : k ∈ B} (defined to be infinity if B = ∅). +This theorem shows that if i and j are two vertices such that d(i, j), d(i, B) and d(j, B) are +all much greater than p−1, then the relative orientations of the spins at i and j in the original and +the perturbed environments are approximately independent of each other (since marginally, both +σiσj and σ′ +iσ′ +j are uniformly distributed on {−1, 1}). +Notice the contrast between the EA model and any ferromagnetic model — even one with +random bonds — in Theorems 1.1 and 1.3. In a ferromagnetic model, a small perturbation +of the environment does not change the ground state at all, whereas in the EA model, a small +perturbation causes such a large change that the original and perturbed ground states are almost +orthogonal to each other. +Our next result gives another such contrast between ferromagnets and the EA model, also +already known to physicists. In ferromagnets, if a region of spins in the ground state is over- +turned, the energy cost is proportional to the size of the boundary of the overturned region. In +the EA model, it is expected that there are macroscopic regions which can be overturned with +energy cost that is negligible compared to the size of the boundary of the overturned region. (In +fact, this belief is central to the “droplet theory” of the EA model [31], and forms the basis of +the heuristic justification of chaos in [15].) +To fix a convention, we will only look at subsets of V ◦ whose sizes are between |V ◦|/4 +and 3|V ◦|/4. Given a region A ⊂ V ◦, let ∆(A) denote the energy cost of overturning all +spins in A in the ground state. We are interested in showing that there is some set A with +|V ◦|/4 ≤ |A| ≤ 3|V ◦|/4 such that the ratio ∆(A)/|∂A| is small, where ∂A be the edge- +boundary of A — that is, the set of all edges from A to V \ A. (If ∂A = ∅, then ∆(A) = 0, and +we then define this ratio to be zero.) To do this, let us define +F := min +�∆(A) +|∂A| : A ⊆ V ◦, |V ◦| +4 +≤ |A| ≤ 3|V ◦| +4 +� +. +The following result shows that F is small with high probability whenever |V ◦| and |V ◦|/|B| +larger than some power of log |E|. As before, we first state the general result, and then specialize +to the case of V = {0, 1, . . . , L}d in the corollary that follows. +4 + +Theorem 1.4. Let all notations be as in Theorem 1.1, and let F be defined as above. Then +E(F) ≤ C max{|V ◦|−1/(2β+2), (|B|/|V ◦|)1/(2δ+1)} +� +log |E|. +where C is a constant that depends only on α, β, γ and δ. +Recall that if V = {0, 1, . . . , L}d with the usual boundary (or free or periodic boundary), +then |V ◦| is of order Ld, |B| is of order Ld−1, β = d, and δ = 1. Additionally, note that |E| is +of order Ld. Thus, we get the following corollary. +Corollary 1.5. In the setting of Theorem 1.4, if V = {0, 1, . . . , L}d with the usual boundary +and with any given disorder-independent boundary condition, then +E(F) ≤ +� +C(d)L−1/4√log L +if d = 1, +C(d)L−1/3√log L +if d ≥ 2, +where C(d) depends only on d. For free or periodic boundary, the bound is +E(F) ≤ C(d)L−d/(2d+2)� +log L for all d ≥ 1. +It is not hard to show that for d = 1, the bound obtained above is suboptimal. This is because +with high probability we can find two edges e and f that are order L apart, where Je and Jf are +both of order L−1. Overturning all spins between e and f creates an overturned region whose +size is of order L, but the energy cost is only of order L−1. Thus, the correct order of E(F) in +d = 1 is L−1. Presumably, the bound given by Corollary 1.5 may be suboptimal for all d, but +that is not clear. Nor is it clear what the correct order should be for d ≥ 2. +The physics literature is not unanimous about the size of F. For example, there are compet- +ing claims, made via numerical studies, that in d = 3, the energy cost ∆(A) can be as small as +O(L1/5) [15], or O(1) [38]. Note that for a macroscopic region A, |∂A| is at least of order L2 in +d = 3. The main difficulty with simulation studies is that finding the ground state is an NP-hard +problem, with no good algorithm even for the “average case”. Simulations can be carried out +with only rather small values of L (e.g., L = 12 in [38]). +As a counterpart of Theorem 1.4, we now show that large regions with small interface en- +ergies, whose existence is guaranteed by Theorem 1.4, are actually exceptionally rare. The +probability that any given region has a small interface energy is exponentially small in the size +of the boundary. This is the content of the next theorem. +Theorem 1.6. In the setting of Theorem 1.1, there are positive constants C1, C2 and C3 depend- +ing only on the maximum degree of G, such that for any A ⊂ V ◦, +P +�∆(A) +|∂A| < C1 +� +≤ C2e−C3|∂A|. +Our final result concerns the size of the so-called “critical droplet” of an edge, an object that +has attracted some recent attention [9]. This is defined as follows. Take any edge e = {i, j}. +Let σ1 be the energy minimizing configuration under the constraint that σi = σj, and let σ2 be +5 + +the energy minimizing configuration under the constraint that σi = −σj. It is easy to see that +σ1 and σ2 do not depend on the value of Je, and for any value of Je (keeping all other fixed), +the ground state of the system is either σ1 or σ2. The critical droplet is the set of sites where σ1 +and σ2 disagree. Under the free boundary condition on G, this is not completely well-defined, +because if a set A fits the above definition, then so does V \ A. In this case we define the size of +the critical droplet (which is our main object of interest) as the minimum of |A| and |V \ A|. +Let D(e) be the critical droplet of an edge e, in the setting of Theorem 1.1. The following +theorem gives a lower bound on the expected value of the size of D(e). +Theorem 1.7. Let D(e) be as above. Then, in the setting of Theorem 1.1, +1 +|E| +� +e∈E +E|D(e)| ≥ +C|V ◦| +|E| max{|V ◦|−1/β, (|B|/|V ◦|)1/δ}, +where C is a positive constant depending only on α, β, γ and δ. +Specializing to the case V = {0, 1, . . . , L}d with the usual boundary (or with free or periodic +boundary), where |V ◦| and |E| are of order Ld, |B| is of order Ld−1, β = d and δ = 1, we obtain +the following corollary. +Corollary 1.8. If V = {0, 1, . . . , Ld} with the usual boundary and with any given disorder- +independent boundary condition (or with free or periodic boundary), then +1 +|E| +� +e∈E +E|D(e)| ≥ C(d)L +for all d ≥ 1, where C(d) is a positive constant depending only on d. +In particular, under the periodic boundary condition on V = {0, 1, . . . , L}d, E|D(e)| ≥ +C(d)L for any e. This has the following consequence for d ≥ 2. Since |D(e)| ≤ |V |/2 = Ld/2 +(due to the periodic boundary condition), an isoperimetric inequality of Bollob´as and Leader +[13, Theorem 8] implies that +|∂D(e)| ≥ min +1≤r≤d 2|D(e)|1−1/rrL(d/r)−1 +≥ min +1≤r≤d 2|D(e)|1−1/rr(2|D(e)|)((d/r)−1)/d ≥ 2|D(e)|1−1/d. +Thus, we get the following corollary. +Corollary 1.9. Take any d ≥ 2. For V = {0, 1, . . . , L}d with periodic boundary condition, we +have that for any edge e, +E|∂D(e)|d/(d−1) ≥ C(d)L, +where C(d) is a positive constant depending only on d. +Note that ∂D(e) is the set of edge-spins that are overturned when Je is replaced by an +independent copy, where we define the spin associated with an edge to be the product of the +6 + +spins at its endpoints. This indicates (but does not prove) that edge-spins are also chaotic with +respect to small perturbations of the disorder. What it does prove is that for an edge e = {i, j}, +the dependence of σiσj on Jf can decrease at most polynomially in the distance between e and +f (if it decays at all). This is in contrast to the one-dimensional situation, where the decay is +exponential, as can be verified by explicitly writing down the ground state as a function of the +disorder and the boundary condition. +1.3 +Related literature and open problems +The phenomenon of chaos in lattice spin glasses was proposed in the physics literature by Fisher +and Huse [30] and Bray and Moore [15]. Disorder chaos for the SK model was proved in +[16, 17]. In [16], it was also shown that the bond overlap in the EA model is not chaotic, in the +sense that its value does not drop to zero under a small perturbation. This still leaves open the +possibility that it drops to a nonzero value strictly less than the value at zero perturbation, which +is the more nuanced definition of chaos for the bond overlap. +Further investigations of disorder chaos in the mean-field setting were carried out in [10, 18, +19, 21–25, 29]. The related notion of temperature chaos in mean-field models was investigated +in [11, 20, 47, 52]. Connections with computational complexity were explored in [32, 35], and +with noise sensitivity in [33]. +In the lattice setting, there are fewer results, reflecting the general dearth of rigorous results +for short-range models. The absence of disorder chaos in the bond overlap of the EA model, +in the narrow sense that was proved in [16, 17], has been recently generalized by Arguin and +Hanson [4]. A very interesting connection between disorder chaos in the bond overlap and the +presence of incongruent states (explained below) was proved by Arguin, Newman, and Stein [8], +who showed that if there is no disorder chaos (in the stronger sense), then incongruent ground +states cannot exist, at least as limits of finite volume ground states with disorder-independent +boundary conditions. +The problem of incongruent ground states is one of the central open problems for short- +range spin glasses. The problem is stated most clearly in the infinite volume setting. Consider +the EA Hamiltonian on the whole of Zd instead of a finite region. The notion of “minimizing +the energy” no longer makes sense, but the difference between the energies of two states that +differ only at a finite number of sites is well-defined and finite. An infinite volume state is called +a ground state if overturning any finite number of spins results in an increase in the energy. +It was shown by Newman and Stein [41] that the number of ground states is almost surely +equal to a constant depending on the dimension and the distribution of the disorder. The main +question is whether this number is greater than two in some dimension and for some symmetric +and continuous distribution of the disorder. Obviously, if σ is an infinite volume ground state, +then so is −σ, and so the number of ground states is at least two. Two ground states that are +not related in this way are called incongruent ground states. The above question is the same +as asking whether there can exist a pair of incongruent ground states. This question remains +unanswered. The greatest progress on this topic was made by Newman and Stein [43], who +showed that in dimension two, if there is a pair of incongruent ground states, then there is a +single doubly infinite “domain wall” dividing them. This result was used by Arguin, Damron, +Newman, and Stein [5] to prove that there is a unique infinite volume ground state in the EA +7 + +model on the half-plane Z × {0, 1, . . .} under a certain sequence of boundary conditions. The +boundary condition was later eliminated by Arguin and Damron [3], who showed the number of +ground state pairs for the EA model on the half-plane is either 1 or ∞. A related result by Berger +and Tessler [12] shows that for the ground state of the EA model on Z2, “unsatisfied edges” (i.e., +where σiσj ̸= sign(Jij)) do not percolate. +The absence of incongruent infinite volume ground states, if true, will have the following +consequence in finite volume. Any two states that are nearly energy-minimizing will locally look +like a pair of congruent states (i.e., either equal or negations of each other), although they may +be globally quite different. The “almost orthogonal” states produced by the small perturbations +of the disorder in Theorem 1.1 may (or may not) have this property. In the physics literature, +this is known as “regional congruence” [36]. +An important contribution to the study of the EA model from the mathematical literature +is the concept of metastates, introduced by Aizenman and Wehr [1, 2]. A zero-temperature +metastate is a measurable map taking the disorder in infinite volume to a probability measure on +the set of ground states. Aizenman and Wehr [1, 2] showed that metastates exist, and an explicit +construction and interpretation was given later by Newman and Stein [42]. Metastates capture +some aspects of the chaotic nature of spin glasses, such as the “chaotic size dependence” proved +in [41], which means that the ground state in a finite region is chaotic with respect to changes in +the size of the region. For some recent results and different perspectives on metastates, see [26]. +Another topic that has received considerable attention in the mathematical literature is the +question of fluctuations of the ground state energy (and more generally, the free energy at any +temperature). This began with the aforementioned papers of Aizenman and Wehr [1, 2], who +showed that the fluctuations are of the same order as the volume of the system. The motivation +for studying fluctuations is that one can connect it to the question of phase transitions via the +“Imry–Ma argument” [37]. This was made precise in [1, 2]. For further developments in the +study of fluctuations, and especially the important topic of interface energy fluctuations, we +refer to [6, 7, 27, 51] and references therein. +One of the great unsolved questions in spin glass theory concerns the validity of the “Parisi +picture” [48] versus the “droplet theory” of Fisher and Huse [30]. Conclusively settling this con- +troversy has remained out of the reach of rigorous mathematics until this day. Theorem 1.4 and +Corollary 1.5 in the present paper are related to this problem. As explained nicely in Krzakala +and Martin [38], the Parisi picture implies that one can overturn all spins in a macroscopic subset +of {0, 1, . . . , L}3 with O(1) energy cost, whereas the droplet theory implies that the minimum +cost grows as a small positive power of L. While Corollary 1.5 does not settle this debate, it is +the first result to show that one can indeed find macroscopic regions with interface energies that +are negligible compared to the size of the interface. +A related question is the following. Let σ and σ′ be the ground states in the original and +perturbed environments, as in Theorem 1.3. Then Theorem 1.3 shows that E(σiσjσ′ +iσ′ +j) drops +sharply to zero as the perturbation parameter p increases from 0 to a small positive value, if i +and j are far apart. Now suppose i and j are neighbors. Then one can show that E(σiσjσ′ +iσ′ +j) +will not drop to zero — but does it drop sharply to a value less than 1? More precisely, is it true +that under the periodic boundary condition, for neighboring i and j, +lim +p→0 lim +L→∞ |E(σiσjσ′ +iσ′ +j)| < 1? +8 + +This, too, would settle the debate between the Parisi picture versus the droplet theory. The +Parisi picture holds if the above is true; the droplet theory, if not. Moreover, it would settle +the longstanding question about the existence of incongruent ground states. Incongruent ground +states exist if and only if the above inequality holds. +Acknowledgements +I thank Louis-Pierre Arguin for helpful comments. This work was partially supported by NSF +grants DMS-2113242 and DMS-2153654. +2 +Proofs +2.1 +Proof of Theorem 1.1 +Let h0, h1, . . . be the orthonormal basis of normalized Hermite polynomials for L2(µ), where µ +is the standard Gaussian distribution on R and h0 ≡ 1. Then an orthonormal basis of L2(J) is +formed by products like hn(J) := � +e∈E hne(Je), where ne ∈ N := {0, 1, . . .} for each e, and +n := (ne)e∈E ∈ NE. Any square-integrable function f(J) of the disorder J can be expanded in +this basis as +f(J) = +� +n∈NE +�f(n)hn(J), +(2.1) +where +�f(n) := E(f(J)hn(J)). +The infinite series on the right side in (2.1) should be interpreted as the L2-limit of partial sums, +where the order of summation is irrelevant. +Now take any distinct i, j ∈ V ◦. Let σ be a ground state in the EA model on G (with +or without a boundary condition). Consider σiσj as a function φ(J) of the disorder J. This +function is well-defined even if we do not impose a boundary condition. Obviously, it is in +L2(J). For any n ∈ NE, let E(n) be the set of edges e ∈ E such that ne > 0, and V (n) be +the set of vertices that are endpoints of the edges in E(n). Then G(n) := (V (n), E(n)) is a +subgraph of G. The following lemma is the main ingredient for the proof of Theorem 1.1. +Lemma 2.1. Let all notations be as above. Then �φ(n) = 0 unless both i and j are in V (n) and +the connected components of G(n) containing i and j are either the same, or both intersect B. +Proof. First, suppose that i ∈ V (n). Let A be the connected component of G(n) that contains +i. Suppose that A ∩ (B ∪ {j}) = ∅. Let ∂A be the set of edges from A to Ac := V \ A. Since +A is a connected component of G(n), no edge in ∂A can be a member of E(n). Define a new +environment J′ as +J′ +e = +� +−Je +if e ∈ ∂A, +Je +if e /∈ ∂A. +9 + +Note that J and J′ have the same law, since the disorder distribution is symmetric around zero, +and the disorder variables are independent. Since ∂A ∩ E(n) = ∅, hn(J) does not depend on +(Je)e∈∂A. Thus, +�φ(n) = E(φ(J)hn(J)) = E(E(φ(J)|(Je)e/∈∂A)hn(J)). +(2.2) +But note that since J and J′ have the same law, and J′ +e = Je for e /∈ ∂A, +E(φ(J)|(Je)e/∈∂A) = E(φ(J′)|(Je)e/∈∂A). +(2.3) +Now, let σ′ be the configuration defined as +σ′ +k = +� +−σk +if k ∈ A, +σk +if k /∈ A. +Then σ′ satisfies the given boundary condition (if any) since A ∩ B = ∅. Let us now split +HJ′(σ′) as +HJ′(σ′) = − +� +{k,l}∈E, +k,l∈A +Jkl(−σk)(−σl) − +� +{k,l}∈∂A +(−Jkl)(−σkσl) − +� +{k,l}∈E, +k,l∈Ac +Jklσkσl += − +� +{k,l}∈E +Jklσkσl = HJ(σ). +Moreover, for any τ ∈ {−1, 1}V satisfying the given boundary condition (if any), HJ′(τ) = +HJ(τ ′), where τ ′ +i = −τi if i ∈ A and τ ′ +i = τi if i /∈ A. Since τ ′ also satisfies the given boundary +condition, this shows that σ′ minimizes HJ′, and so σ′ +iσ′ +j = φ(J′). But since j /∈ A and i ∈ A, +σ′ +iσ′ +j = −σiσj. Thus, φ(J′) = −φ(J), and so, by (2.3), +E(φ(J)|(Je)e/∈∂A) = 0. +Plugging this into (2.2), we get that �φ(n) = 0 if i ∈ V (n) and A does not intersect B ∪ {j}. +Next, suppose that i /∈ V (n). In this case, taking A := {i} and repeating the whole argument +as above shows that �φ(n) = 0. Thus, �φ(n) = 0 unless i ∈ V (n) and A intersects B ∪ {j}. +By the symmetry between i and j, we conclude that �φ(n) = 0 unless j ∈ V (n) and the +component of G(n) containing j intersects B ∪ {i}. Combining these two conclusions yields +the claim of the lemma. +Lemma 2.1 gives the following key corollary, which says that if i and j are far apart and far +away from the boundary, then the Hermite polynomial expansion of σiσj consists of only high +degree terms. +Corollary 2.2. If �φ(n) ̸= 0, then |E(n)| ≥ min{d(i, j), d(i, B) + d(j, B)}, where d is the +graph distance on G and d(i, B) := min{d(i, k) : k ∈ B} (which is infinity if B is empty). +10 + +Proof. Suppose that �φ(n) ̸= 0. Then by Lemma 2.1, i, j ∈ V (n) and the connected components +containing i and j are either the same, or they both touch B. In the first case, there is a path of +edges in E(n) connecting i to j, which implies that |E(n)| ≥ d(i, j). In the second case, there +is a path in G(n) connecting i to B and another path in G(n) connecting j to B, which implies +that |E(n)| ≥ d(i, B) + d(j, B). +In the following, instead of using the parameter p for the perturbation, we will reparametrize +p as 1 − e−t, where t ∈ (0, ∞). This is helpful for the following reason. Let +J(t) := e−tJ + +� +1 − e−2tJ′ = (1 − p)J + +� +2p − p2J′. +Then J(t) is the perturbed environment for our first kind of perturbation. It is a standard fact +that for any f ∈ L2(J), E(f(J(t))|J) = Ptf(J), where (Pt)t≥0 is the Ornstein–Uhlenbeck +semigroup (see, e.g., [17, Chapter 2 and Chapter 6]). Moreover, for each n ∈ NE, hn is an +eigenfunction of the Ornstein–Uhlenbeck generator, with eigenvalue +|n| := +� +e∈E +ne. +This implies that for any f ∈ L2(J), +Ptf(J) = +� +n∈NE +e−|n|t �f(n)hn(J). +In particular, by the Parseval identity, +E[(E(f(J(t))|J))2] = ∥Ptf(J)∥2 +L2 = +� +n∈NE +e−2|n|t �f(n)2. +(2.4) +Now consider the second kind of perturbation, where each Je is replaced by an independent +copy J′ +e with probability p. Let us again reparametrize p = 1 − e−t. Recall that h0 ≡ 1, and for +n ∈ N \ {0}, hn integrates to zero under the standard Gaussian measure on R. This implies that +for any n ∈ NE, +E(hn(J(t))|J) = (1 − p)δ(n)hn(J) = e−δ(n)thn(J), +where +δ(n) := |{e ∈ E : ne > 0}|. +Therefore, in this case, +E(f(J(t))|J) = +� +n∈NE +e−δ(n)t �f(n)hn(J) +and hence, +E[(E(f(J(t))|J))2] = +� +n∈NE +e−2δ(n)t �f(n)2. +(2.5) +Combining the above observations with Corollary 2.2, we get the following lemma. +11 + +Lemma 2.3. Let σ(t) be a ground state for the perturbed environment J(t), where the pertur- +bation is either of the two kinds described above. Then for any distinct i, j ∈ V ◦, +E[(E(σi(t)σj(t)|J))2] ≤ e−2t min{d(i,j),d(i,B)+d(j,B)}. +Proof. Since δ(n) ≤ |n|, the inequalities (2.4) and (2.5) shows that for either kind of perturba- +tion, +E[(E(σi(t)σj(t)|J))2] ≤ +� +n∈NE +e−2δ(n)t �φ(n)2. +By Corollary 2.2, we know that �φ(n) = 0 unless δ(n) ≥ min{d(i, j), d(i, B) + d(j, B)}. +Combining this with the above inequality completes the proof. +We are now ready to prove Theorem 1.1. +Proof of Theorem 1.1. We will work with the reparametrization p = 1 − e−t, and write R(t) +instead of R(p). Thus, by Lemma 2.3, +E(R(t)2) = +1 +|V ◦|2 +� +i,j∈V ◦ +E(σiσjσi(t)σj(t)) += +1 +|V ◦|2 +� +i,j∈V ◦ +E(σiσjE(σi(t)σj(t)|J)) +≤ +1 +|V ◦|2 +� +i,j∈V ◦ +E|E(σi(t)σj(t)|J)| +≤ +1 +|V ◦|2 +� +i,j∈V ◦ +� +E[(E(σi(t)σj(t)|J))2] +≤ +1 +|V ◦| + +1 +|V ◦|2 +� +i,j∈V ◦, +i̸=j +e−t min{d(i,j),d(i,B)+d(j,B)}. +For each k ∈ N, let +Nk := |{(i, j) : i, j ∈ V ◦, i ̸= j, k/t ≤ min{d(i, j), d(i, B) + d(j, B)} ≤ (k + 1)/t}|. +Then note that +� +i,j∈V ◦, +i̸=j +e−t min{d(i,j),d(i,B)+d(j,B)} ≤ +∞ +� +k=0 +Nke−k. +(2.6) +Since the left side is decreasing in t, it is not hard to see that it suffices to prove the theorem +under the assumption that t ∈ (0, 1). By the given conditions (and the assumption that t < 1), +we have that for any i ∈ V ◦ and k ∈ N, the number of j ∈ V ◦ such that d(i, j) ≤ (k + 1)/t is +at most α(k + 1)βt−β. Thus, the number of pairs (i, j) such that d(i, j) ≤ (k + 1)/t is at most +12 + +α|V ◦|(k + 1)βt−β. Next, note that the number of i such that d(i, B) ≤ (k + 1)/t is at most +|B|γ(k + 1)δt−δ. Thus, the number of pairs (i, j) such that d(i, B) + d(j, B) ≤ (k + 1)/t is at +most |B|2γ2(k + 1)2δt−2δ. Combining, we get +Nk ≤ α|V ◦|(k + 1)βt−β + |B|2γ2(k + 1)2δt−2δ. +Plugging this bound into (2.6), we get +� +i,j∈V ◦, +i̸=j +e−t min{d(i,j),d(i,B)+d(j,B)} ≤ C(|V ◦|t−β + |B|2t−2δ), +where C depends only on α, β, γ and δ. Since t = − log(1 − p), there is a constant C′ such that +t−1 ≤ C′p−1 for all p ∈ (0, 1 − e−1). This completes the proof of Theorem 1.1. +2.2 +Proof of Theorem 1.3 +Let us reparametrize p = 1−e−t, as before. Let σ′ := σ(t), in the notation of Lemma 2.3. Then +by Lemma 2.3, +|E(σiσjσ′ +iσ′ +j)| = |E(σiσjE(σ′ +iσ′ +j|J))| +≤ E|E(σ′ +iσ′ +j|J)| +≤ +� +E[(E(σ′ +iσ′ +j|J))2] +≤ e−t min{d(i,j),d(i,B)+d(j,B)}. +Since e−t = 1 − p, this completes the proof of Theorem 1.3. +2.3 +Proof of Theorem 1.4 +In the setting of Theorem 1.1, take +p = c max{|V ◦|−1/(β+1), (|B|/|V ◦|)2/(2δ+1)}, +where c is a positive constant, to be chosen later. By Theorem 1.1, +E(R(p)2) ≤ +1 +|V ◦| + C(|V ◦|p−β + |B|2p−2δ) +|V ◦|2 +≤ (1 + C)c−β−1pβ+1p−β + Cc−2δ−1p2δ+1p−2δ +≤ ((1 + C)c−β−1 + Cc−2δ−1)p. +Thus, we can choose c large enough, depending only on α, β, γ and δ, such that +E(R(p)2) ≤ p. +(2.7) +Notice that to ensure that p ∈ (0, 1), we need |V ◦| and |V ◦|/|B| large enough (depending +only on α, β, γ and δ). In fact, we can ensure that p ∈ (0, 1/2), and this is what we will assume +13 + +henceforth. It will be shown at the end of the proof that this assumption about |V ◦| and |V ◦|/|B| +can be dropped. +Let J(p) be the perturbed Hamiltonian, with the first kind of perturbation — that is, J(p) = +(1− p)J + +� +2p − p2J′, where J′ is an independent copy of J. Let σ(p) be the ground state for +the perturbed environment. Let A be the region where σ(p) disagrees with σ. Then note that +|A| = |{i ∈ V ◦ : σiσi(p) = −1}| += 1 +2(|{i ∈ V ◦ : σiσi(p) = −1}| + |V ◦| − |{i ∈ V ◦ : σiσi(p) = 1}|) += |V ◦| +2 +− |V ◦|R(p) +2 +. +Thus, by (2.7), +P(||A| − |V ◦|/2| > |V ◦|/4) = P(|R(p)| > 1/2) +≤ 4E(R(p)2) ≤ 4p. +(2.8) +Next, note that +σi(p)σj(p) = +� +−σiσj +if {i, j} ∈ ∂A, +σiσj +otherwise. +(2.9) +Thus, +HJ(σ(p)) − HJ(σ) = 2 +� +{i,j}∈∂A +Jijσiσj. +On the other hand, since σ(p) minimizes HJ(p), +HJ(p)(σ) − HJ(p)(σ(p)) ≥ 0. +But note that by equation (2.9), +HJ(p)(σ) − HJ(p)(σ(p)) = −2 +� +{i,j}∈∂A +Jij(p)σiσj += −2(1 − p) +� +{i,j}∈∂A +Jijσiσj − 2 +� +2p − p2 +� +{i,j}∈∂A +J′ +ijσiσj += −(1 − p)(HJ(σ(p)) − HJ(σ)) − 2 +� +2p − p2 +� +{i,j}∈∂A +J′ +ijσiσj. +Combining the last two displays, we get +HJ(σ(p)) − HJ(σ) ≤ −2 +� +2p − p2 +1 − p +� +{i,j}∈∂A +J′ +ijσiσj +≤ 2 +� +2p − p2 +1 − p +|∂A| max +{i,j}∈E |J′ +ij|. +14 + +But HJ(σ(p)) − HJ(σ) = ∆(A). Thus, +E +�∆(A) +|∂A| +� +≤ 2 +� +2p − p2 +1 − p +E( max +{i,j}∈E |J′ +ij|) ≤ 2 +� +2p − p2 +1 − p +� +4 log |E|, +(2.10) +where the last inequality follows from a standard fact about Gaussian random variables (see, +e.g., [17, Equation (A.3)]), and we interpret the ratio ∆(A)/|∂A| to be zero if A = ∅. +Now note that for any S ⊆ V ◦, +∆(S) ≤ 2|∂S| max +e∈E |Je|. +(2.11) +Combining this with (2.8), (2.10), and the fact that p < 1/2, we get +E(F) ≤ E +�∆(A) +|∂A| +� ++ 2E(1{||A|−|V ◦|/2|>|V ◦|/4} max +e∈E |Je|) +≤ C +� +p log |E| + 2 +� +P(||A| − |V ◦|/2| > |V ◦|/4)E(max +e∈E J2e ) +≤ C +� +p log |E| + C +� +pE(max +e∈E J2e ), +where C depends only on α, β, γ and δ. Again, by standard facts about Gaussian random +variables, it follows that +E(max +e∈E J2 +e ) = 4E +� +log +� +e∈E +eJ2 +e /4 +� +≤ 4 log +� +e∈E +E(eJ2 +e /4) = 4 log |E| + 4 log +√ +2 ≤ 8 log |E|, +where the last inequality holds because |E| ≥ 2. This completes the proof of Theorem 1.4 +under the assumption that |V ◦| and |V ◦|/|B| are larger than some constant depending on α, β, +γ and δ. To fully complete the proof, let us now show that this assumption can be dropped. +Note that by (2.11), F is always bounded above by 2 maxe∈E |Je|. Thus, we always have that +E(F) ≤ 2 +� +4 log |E|. This shows that by sufficiently increasing the constant C in the statement +of the theorem, we can have the required inequality hold without the largeness assumption on +|V ◦| and |V ◦|/|B|. +2.4 +Proof of Theorem 1.6 +Take any edge e = {i, j} ∈ ∂A. Let H1 (resp., H2) be the minimum energy of the system +subject to the constraints σi = σj (resp., σi = −σj) and Je = 0, keeping all other edge weights +intact. Then the ground state energy is min{−Je + H1, Je + H2}. Moreover, the ground state +satisfies σi = σj if −Je + H1 < Je + H2 and σi = −σj if −Je + H1 > Je + H2. (Note that +these are the only possibilities, since equality occurs with probability zero.) The conditions can +be rewritten as Je > (H1 − H2)/2 and Je < (H1 − H2)/2. Thus, if we change the value of Je, +the ground state does not change as long as the new value is on the same side of (H1 − H2)/2 +as the old one. +Let us say that two edges are “neighbors” of each other if they share one common endpoint. +It is not hard to see that |H1 − H2| is at most the sum of |Jf| over all edges f that are neighbors +15 + +of e. Let Se denote this sum. We will say that e is a “special edge” if Je > Se + 2. Note that if +e = {i, j} is special, then Je > 0 and σi = σj for the ground state σ. +It is easy to see that one can choose a subset K ⊆ ∂A such that no two edges in K are +neighbors of each other or have a common neighbor, and |K| ≥ c|∂A|, where c > 0 depends +only on the maximum degree of G. +We make two important observations about K. First, note that the events {Je > Se + 2}, +as e ranges over K, are independent. Thus, if X denotes the number of special edges in K, +then X is a sum of independent Bernoulli random variables. Moreover, it is not hard to see that +E(X) ≥ a|K| for some constant a > 0 depending only on the maximum degree of G. +Next, we claim that ∆(A) ≥ 2X. To see this, note that if we replace Je by Je − 1 for any +special edge e = {i, j} ∈ K, then the ground state does not change. But all other special edges +in K remain special even after this operation, since no two edges in K are neighbors of each +other. Thus, we can repeat this substitution successively for each special edge in K, keeping the +ground state unchanged. +Let σ denote the ground state in the environment J. Let J′ denote the new environment +obtained above, and σ′ denote the state obtained by overturning all the spins in A. Then by the +conclusion of the previous paragraph, HJ′(σ) ≤ HJ′(σ′). But note that σiσj = 1 for every +special edge {i, j}. Thus, +HJ′(σ′) − HJ′(σ) = 2 +� +{i,j}∈∂A +J′ +ijσiσj += 2 +� +{i,j}∈∂A +Jijσiσj − 2X += ∆(A) − 2X. +This proves that ∆(A) ≥ 2X. By the observations about X made above, it is now easy to +complete the proof (e.g., by Hoeffding’s concentration inequality). +2.5 +Proof of Theorem 1.7 +Consider the system perturbed by the second kind of perturbation, with parameter p. +Let +X be the number of edges where Je is replaced by an independent copy J′ +e. Then X is a +Binomial(|E|, p) random variable. A different way to cause the same perturbation is to first +generate X from the Binomial(|E|, p) distribution, and then pick X distinct edges at random +and replace the couplings by independent copies. Let e1, e2, . . . , eX denote these edges. Let +σ0 = σ be the original ground state, and σk be the ground state after replacing Je1, . . . , Jek by +independent copies. Let σ′ = σX be the ground state after completing the whole replacement +process. +Let R(p) denote the site overlap between σ and σ′. Note that +R(p)2 = +1 +|V ◦|2 +� +i,j∈V ◦ +σiσjσ′ +iσ′ +j = +2 +|V ◦|2 +� +i,j∈V ◦ +�1 +2 − 1{σiσj̸=σ′ +iσ′ +j} +� +, +16 + +which implies that +E(R(p)2) = +2 +|V ◦|2 +� +i,j∈V ◦ +�1 +2 − P(σiσj ̸= σ′ +iσ′ +j) +� +(2.12) +Now note that +P(σiσj ̸= σ′ +iσ′ +j|X) ≤ +X +� +k=1 +P(σk−1 +i +σk−1 +j +̸= σk +i σk +j |X). +(2.13) +Let �σ be the ground state after replacing the weight on one uniformly chosen edge by an inde- +pendent copy in the original system. Given X, σk−1 has the same law as σ for any 1 ≤ k ≤ X. +Given X and σk−1, ek is uniformly distributed on E. Thus, given X, (σk−1, σk) has the same +distribution as (σ, �σ). This shows that for any 1 ≤ k ≤ X, +P(σk−1 +i +σk−1 +j +̸= σk +i σk +j |X) = P(σiσj ̸= �σi�σj). +Plugging this into (2.13), we get +P(σiσj ̸= σ′ +iσ′ +j|X) ≤ XP(σiσj ̸= �σi�σj). +Taking expectation on both sides gives +P(σiσj ̸= σ′ +iσ′ +j) ≤ |E|pP(σiσj ̸= �σi�σj). +Combining this with (2.12), we get +� +i,j∈V ◦ +P(σiσj ̸= �σi�σj) ≥ +1 +|E|p +� +i,j∈V ◦ +P(σiσj ̸= σ′ +iσ′ +j) += |V ◦|2 +2|E|p(1 − E(R(p)2)). +Applying Theorem 1.1 to the right side gives +� +i,j∈V ◦ +P(σiσj ̸= �σi�σj) ≥ |V ◦|2 +2|E|p +� +1 − +1 +|V ◦| − C(|V ◦|p−β + |B|2p−2δ) +|V ◦|2 +� +. +Choosing p = c max{|V ◦|−1/β, (|B|/|V ◦|)1/δ} for some sufficiently large c (depending only +on α, β, γ and δ), and assuming that |V ◦| and |V ◦|/|B| are sufficiently large (again, depending +only on α, β, γ and δ), we can ensure that the term within the brackets on the right side above is +at least 1/2. Thus, if |V ◦| and |V ◦|/|B| are large enough, then +� +i,j∈V ◦ +P(σiσj ̸= �σi�σj) ≥ +C|V ◦|2 +|E| max{|V ◦|−1/β, (|B|/|V ◦|)1/δ}, +for some C > 0 that depends only on α, β, γ and δ. But the number of pairs (i, j) such that +σiσj ̸= �σi�σj is equal to (|V ◦| − |A|)|A|, where A is the set of sites where σ disagrees with �σ +(taking the smaller of two sets if B = ∅). Thus, +E|A| ≥ +1 +|V ◦|E[(|V ◦| − |A|)|A|] = +1 +|V ◦| +� +i,j∈V ◦ +P(σiσj ̸= �σi�σj). +Combining with the previous display completes the proof of Theorem 1.7. +17 + +References +[1] M. Aizenman and J. Wehr. +Rounding of first-order phase transitions in systems with +quenched disorder. Physical Review Letters, 62(21):2503, 1989. +[2] M. Aizenman and J. Wehr. Rounding effects of quenched randomness on first-order phase +transitions. Communications in Mathematical Physics, 130(3):489–528, 1990. +[3] L.-P. Arguin and M. Damron. On the number of ground states of the Edwards–Anderson +spin glass model. Annales de l’IHP Probabilit´es et Statistiques, 50(1):28–62, 2014. +[4] L.-P. Arguin and J. Hanson. On absence of disorder chaos for spin glasses on Zd. Electronic +Communications in Probability, 25:1–12, 2020. +[5] L.-P. Arguin, M. Damron, C. M. Newman, and D. L. Stein. Uniqueness of ground states +for short-range spin glasses in the half-plane. Communications in Mathematical Physics, +300(3):641–657, 2010. +[6] L.-P. Arguin, C. M. Newman, D. L. Stein, and J. Wehr. Fluctuation bounds for interface +free energies in spin glasses. Journal of Statistical Physics, 156(2):221–238, 2014. +[7] L.-P. Arguin, C. M. Newman, D. L. Stein, and J. Wehr. Zero-temperature fluctuations in +short-range spin glasses. Journal of Statistical Physics, 163(5):1069–1078, 2016. +[8] L.-P. Arguin, C. M. Newman, and D. L. Stein. A relation between disorder chaos and +incongruent states in spin glasses on Zd. Communications in Mathematical Physics, 367 +(3):1019–1043, 2019. +[9] L.-P. Arguin, C. M. Newman, and D. L. Stein. Ground state stability in two spin glass +models. In In and Out of Equilibrium 3: Celebrating Vladas Sidoravicius, pages 17–25. +Springer, 2021. +[10] A. Auffinger and W.-K. Chen. Universality of chaos and ultrametricity in mixed p-spin +models. Communications on Pure and Applied Mathematics, 69(11):2107–2130, 2016. +[11] G. Ben Arous, E. Subag, and O. Zeitouni. Geometry and temperature chaos in mixed +spherical spin glasses at low temperature: the perturbative regime. Communications on +Pure and Applied Mathematics, 73(8):1732–1828, 2020. +[12] N. Berger and R. J. Tessler. No percolation in low temperature spin glass. Electronic +Journal of Probability, 22:1–19, 2017. +[13] B. Bollob´as and I. Leader. Edge-isoperimetric inequalities in the grid. Combinatorica, 11 +(4):299–314, 1991. +[14] A. J. Bray and M. A. Moore. Critical behavior of the three-dimensional Ising spin glass. +Physical Review B, 31(1):631, 1985. +18 + +[15] A. J. Bray and M. A. Moore. Chaotic nature of the spin-glass phase. Physical Review +Letters, 58(1):57, 1987. +[16] S. Chatterjee. +Disorder chaos and multiple valleys in spin glasses. +arXiv preprint +arXiv:0907.3381, 2009. +[17] S. Chatterjee. Superconcentration and Related Topics. Springer, Cham, 2014. +[18] W.-K. Chen. Disorder chaos in the Sherrington–Kirkpatrick model with external field. +Annals of Probability, 41(5):3345–3391, 2013. +[19] W.-K. Chen. Chaos in the mixed even-spin models. Communications in Mathematical +Physics, 328(3):867–901, 2014. +[20] W.-K. Chen and D. Panchenko. Temperature chaos in some spherical mixed p-spin models. +Journal of Statistical Physics, 166(5):1151–1162, 2017. +[21] W.-K. Chen and D. Panchenko. Disorder chaos in some diluted spin glass models. Annals +of Applied Probability, 28(3):1356–1378, 2018. +[22] W.-K. Chen and A. Sen. Parisi formula, disorder chaos and fluctuation for the ground state +energy in the spherical mixed p-spin models. Communications in Mathematical Physics, +350(1):129–173, 2017. +[23] W.-K. Chen, H.-W. Hsieh, C.-R. Hwang, and Y.-C. Sheu. Disorder chaos in the spherical +mean-field model. Journal of Statistical Physics, 160(2):417–429, 2015. +[24] W.-K. Chen, P. Dey, and D. Panchenko. Fluctuations of the free energy in the mixed p-spin +models with external field. Probability Theory and Related Fields, 168(1):41–53, 2017. +[25] W.-K. Chen, M. Handschy, and G. Lerman. On the energy landscape of the mixed even +p-spin model. Probability Theory and Related Fields, 171(1):53–95, 2018. +[26] C. Cotar, B. Jahnel, and C. K¨ulske. Extremal decomposition for random Gibbs measures: +from general metastates to metastates on extremal random Gibbs measures. Electronic +Communications in Probability, 23:1–12, 2018. +[27] P. Dario, M. Harel, and R. Peled. Quantitative disorder effects in low-dimensional spin +systems. arXiv preprint arXiv:2101.01711, 2021. +[28] S. F. Edwards and P. W. Anderson. Theory of spin glasses. Journal of Physics F: Metal +Physics, 5(5):965, 1975. +[29] R. Eldan. A simple approach to chaos for p-spin models. Journal of Statistical Physics, +181(4):1266–1276, 2020. +[30] D. S. Fisher and D. A. Huse. Ordered phase of short-range Ising spin-glasses. Physical +Review Letters, 56(15):1601, 1986. +19 + +[31] D. S. Fisher and D. A. Huse. Equilibrium behavior of the spin-glass ordered phase. Physi- +cal Review B, 38(1):386, 1988. +[32] D. Gamarnik. The overlap gap property: A topological barrier to optimizing over random +structures. Proceedings of the National Academy of Sciences, 118(41):e2108492118, 2021. +[33] C. Garban and J. E. Steif. Noise sensitivity of Boolean functions and percolation. Cam- +bridge University Press, 2014. +[34] M. R. Garey and D. S. Johnson. Computers and Intractability: A guide to the theory of +NP-completeness. Freeman, San Francisco, CA, 1979. +[35] B. Huang and M. Sellke. Tight Lipschitz hardness for optimizing mean field spin glasses. +arXiv preprint arXiv:2110.07847, 2021. +[36] D. A. Huse and D. S. Fisher. Pure states in spin glasses. Journal of Physics A: Mathemat- +ical and General, 20(15):L997, 1987. +[37] Y. Imry and S.-k. Ma. Random-field instability of the ordered state of continuous symme- +try. Physical Review Letters, 35(21):1399, 1975. +[38] F. Krzakala and O. C. Martin. Spin and link overlaps in three-dimensional spin glasses. +Physical Review Letters, 85(14):3013, 2000. +[39] W. L. McMillan. Scaling theory of Ising spin glasses. Journal of Physics C: Solid State +Physics, 17(18):3179, 1984. +[40] M. M´ezard, G. Parisi, and M. A. Virasoro. Spin glass theory and beyond: An Introduc- +tion to the Replica Method and Its Applications, volume 9. World Scientific Publishing +Company, 1987. +[41] C. M. Newman and D. L. Stein. Multiple states and thermodynamic limits in short-ranged +Ising spin-glass models. Physical Review B, 46(2):973, 1992. +[42] C. M. Newman and D. L. Stein. Thermodynamic chaos and the structure of short-range +spin glasses. In Mathematical aspects of spin glasses and neural networks, pages 243–287. +Springer, 1998. +[43] C. M. Newman and D. L. Stein. Are there incongruent ground states in 2d Edwards– +Anderson spin glasses? Communications in Mathematical Physics, 224(1):205–218, 2001. +[44] C. M. Newman and D. L. Stein. +Ordering and broken symmetry in short-ranged spin +glasses. Journal of Physics: Condensed Matter, 15(32):R1319–R1364, 2003. +[45] M. Palassini and A. P. Young. Nature of the spin glass state. Physical Review Letters, 85 +(14):3017, 2000. +[46] D. Panchenko. The Sherrington–Kirkpatrick model. Springer Science & Business Media, +2013. +20 + +[47] D. Panchenko. Chaos in temperature in generic 2p-spin models. Communications in Math- +ematical Physics, 346(2):703–739, 2016. +[48] G. Parisi. Mean field theory of spin glasses: Statics and dynamics. In Complex Systems, +volume 85 of Les Houches, pages 131–178. Elsevier, 2007. +[49] D. Sherrington and S. Kirkpatrick. Solvable model of a spin-glass. Physical Review Letters, +35(26):1792, 1975. +[50] W.-K. Shih, S. Wu, and Y.-S. Kuo. Unifying maximum cut and minimum cut of a planar +graph. IEEE Transactions on Computers, 39(5):694–697, 1990. +[51] D. L. Stein. Frustration and fluctuations in systems with quenched disorder. In Pwa90: A +Lifetime of Emergence, pages 169–186. World Scientific, 2016. +[52] E. Subag. The geometry of the Gibbs measure of pure spherical spin glasses. Inventiones +Mathematicae, 210(1):135–209, 2017. +[53] M. Talagrand. Mean field models for spin glasses: Volume I: Basic examples. Springer +Science & Business Media, 2010. +[54] M. Talagrand. +Mean Field Models for Spin Glasses: Volume II: Advanced Replica- +Symmetry and Low Temperature. Springer Science & Business Media, 2011. +21 + diff --git a/e9E2T4oBgHgl3EQfxgi1/content/tmp_files/load_file.txt b/e9E2T4oBgHgl3EQfxgi1/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..cd867fd093d130992d48e071f2f36b56f22da19d --- /dev/null +++ b/e9E2T4oBgHgl3EQfxgi1/content/tmp_files/load_file.txt @@ -0,0 +1,866 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf,len=865 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='04112v1 [math-ph] 10 Jan 2023 Spin glass phase at zero temperature in the Edwards–Anderson model Sourav Chatterjee* Stanford University January 11, 2023 Abstract This article solves two open problems about the Edwards–Anderson model of short- range spin glasses (in all dimensions).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' First, it is shown that the ground state is sensitive to small perturbations of the disorder, in the sense that a small amount of noise gives rise to a new ground state that is nearly orthogonal to the old one with respect to the site over- lap inner product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Second, it is shown that one can overturn a macroscopic fraction of the spins in the ground state with an energy cost that is negligible compared to the size of the boundary of the overturned region — a feature that is believed to be typical of spin glasses but clearly absent in ferromagnets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Together, these comprise the first mathematical proof of glassy behavior in a short-range spin glass model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Edwards–Anderson model, disorder chaos, spin glass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 2020 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 82B44, 82D30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 1 Introduction The Edwards–Anderson (EA) model was introduced in [28] as a realistic model of a spin glass in finite dimensions with short-range interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In contrast to the Sherrington–Kirkpatrick (SK) model of spin glasses with mean-field interactions [49], which has been analyzed with tremendous success [46, 53, 54], the analysis of the EA model remains an elusive goal in both mathematics and physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In particular, one question that has remained beyond the reach of mathematical proof is whether the EA model indeed exhibits the physical characteristics of a true glassy material at low enough temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But the question goes beyond the nitty-gritty of mathematical rigor;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' even physicists are not unanimous about the true nature of the EA model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For more about this longstanding debate, see [14, 31, 38–40, 44, 45] and the references therein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Mailing address: Department of Statistics, Stanford University, 390 Jane Stanford Way, Stanford, CA 94305, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Email: souravc@stanford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 The model Let G be a finite, simple, connected graph with vertex set V and edge set E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let J = (Je)e∈E be a collection of i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' random variables with a given law µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The Edwards–Anderson Hamiltonian on this graph in the environment (or disorder, or bond strengths, or edge weights) J is the random function HJ : {−1, 1}V → R defined as HJ(σ) := − � {i,j}∈E Jijσiσj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A ground state for this model is a state σ (depending on J) that minimizes the above Hamilto- nian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' If µ has no atoms, then it is not hard to show that with probability one, there are exactly two ground states σ and −σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' What we have described above is the ground state under the free boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Some- times we impose a boundary condition, in the following way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let B be a nonempty subset of V and γ be a fixed element of {−1, 1}B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then the ground state under boundary condition γ on the boundary B is the minimizer of HJ(σ) under the constraint that σi = γi for each i ∈ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Again, it is not hard to show that under a boundary condition, there is a unique ground state with probability one if µ has no atoms, provided that V \\ B is a connected subset of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We will henceforth assume that V \\ B is connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' To fix ideas, the reader can think of G as the cube {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d in Zd, with the usual nearest-neighbor edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the absence of a boundary condition, we have the EA Hamiltonian with free boundary condition on this cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The usual boundary B in this setting is the set of vertices that forms the boundary of the cube (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', at least one coordinate is 0 or L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Alternatively, one can identify vertices belonging to opposite faces;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' the free boundary model in this case is what’s called the EA model on the cube with periodic boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The EA model at inverse temperature β assigns a probability measure with mass proportional to e−βH(σ) at each σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The β = ∞ (zero temperature) model is just the probability measure that puts all its mass on the ground state (or the uniform distribution on the pair of ground states in the free boundary case).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In this paper, we will only consider the zero temperature model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Also, throughout, we will take the disorder distribution µ to be the standard Gaussian distribution, although various parts of the proofs should work for quite general distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Incidentally, one of the difficulties in analyzing the ground state of the EA model is that find- ing the ground state is the same as finding the maximum cut in the weighted complete graph on V with edges weights (Je)e∈E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The maximum cut problem is NP-hard (for general graphs [34], although not for planar graphs [50]), which makes finding the ground state also NP-hard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2 Results Our first main result is that the ground state of the EA model with standard Gaussian disorder is sensitive to small changes in the disorder J, a phenomenon that is sometimes called “disorder chaos”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We consider two kinds of perturbations, both determined by a parameter p ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the first kind of perturbation, we replace each Je by (1 − p)Je + � 2p − p2J′ e, where J′ = (J′ e)e∈E is another set of i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' standard Gaussian random variables, independent of J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The coefficients in front of Je and J′ e are chosen to ensure that the linear combination is again a 2 standard Gaussian random variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the second kind of perturbation, each Je is replaced by J′ e with probability p, independently of each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let V ◦ := V \\ B denote the set of “interior vertices” of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that V ◦ = V when B = ∅ (the case of free boundary).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We have already assumed earlier that V ◦ is connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' To avoid trivialities, we will assume that V ◦ is nonempty and |E| ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ be the ground state in the original environment and σ′ be the ground state in the perturbed environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The “site overlap” between the two configurations is defined as R(p) := 1 |V ◦| � i∈V ◦ σiσ′ i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' If B = ∅ (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', for the free boundary condition), R(p) is not well-defined since there are two ground states in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But R(p)2 is still well-defined, and that is sufficient for our purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that R(p) is close to zero if and only if σ and σ′ are nearly orthogonal to each other — or in other words, σ and σ′ disagree on approximately half the vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The following theorem shows that under certain conditions, R(p) ≈ 0 with high probability for a tiny value of p, which is what’s commonly known as disorder chaos for the site overlap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We first state the result for a general graph G, and then specialize to the case of a cube in Zd in the corollary that follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let all notations be as above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let d denote the graph distance on G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Suppose that there are positive constants α, β, γ and δ such that for any i ∈ V ◦ and r ≥ 1, the number of j such that d(i, j) ≤ r is at most αrβ, and the number of j such that min{d(j, k) : k ∈ B} ≤ r is at most γ|B|rδ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then for both kinds of perturbations, we have that for any p ∈ (0, 1), E(R(p)2) ≤ 1 |V ◦| + C(|V ◦|p−β + |B|2p−2δ) |V ◦|2 , where C is a constant depending only on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let us now check what this yields for V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d with the usual boundary, for some dimension d ≥ 1 (not to be confused with the graph distance d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In this case, |V ◦| is of order Ld, |B| is of order Ld−1, β = d, and δ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, we get the following corollary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' If V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d with the usual boundary and with any given disorder- independent boundary condition, then for both kinds of perturbations, we have that for all p ∈ (0, 1), E(R(p)2) ≤ � C(d)L−1p−1 if d = 1, C(d)L−2p−2 if d ≥ 2, where C(d) depends only on d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For free or periodic boundary, the bound is E(R(p)2) ≤ C(d) Ldpd for all d ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This shows that R(p) ≈ 0 with high probability whenever p ≫ L−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In other words, if p ≫ L−1, σ and σ′ disagree at approximately half the sites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This is a mathematical proof of 3 the conjecture (made 35 years ago in [15], with heuristic justification) that the ground state of the EA model is chaotic under small perturbations of the disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It is not clear if the threshold L−1 can be improved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Simulations suggest that improvements may be possible [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 also yields the following result, which justifies the claim made in [15] that the glassy nature of the EA model at zero temperature is characterized by a chaotic phase in which the “relative orientations of spins with large separations are sensitive to small changes in the bond strengths”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the setting of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, take any p ∈ (0, 1), and let σ and σ′ be the ground states of the unperturbed system and the system with perturbation parameter p (for either kind of perturbation), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then for any i, j ∈ V ◦, |E(σiσjσ′ iσ′ j)| ≤ (1 − p)min{d(i,j),d(i,B)+d(j,B)}, where d(i, B) := min{d(i, k) : k ∈ B} (defined to be infinity if B = ∅).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This theorem shows that if i and j are two vertices such that d(i, j), d(i, B) and d(j, B) are all much greater than p−1, then the relative orientations of the spins at i and j in the original and the perturbed environments are approximately independent of each other (since marginally, both σiσj and σ′ iσ′ j are uniformly distributed on {−1, 1}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Notice the contrast between the EA model and any ferromagnetic model — even one with random bonds — in Theorems 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In a ferromagnetic model, a small perturbation of the environment does not change the ground state at all, whereas in the EA model, a small perturbation causes such a large change that the original and perturbed ground states are almost orthogonal to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Our next result gives another such contrast between ferromagnets and the EA model, also already known to physicists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In ferromagnets, if a region of spins in the ground state is over- turned, the energy cost is proportional to the size of the boundary of the overturned region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the EA model, it is expected that there are macroscopic regions which can be overturned with energy cost that is negligible compared to the size of the boundary of the overturned region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (In fact, this belief is central to the “droplet theory” of the EA model [31], and forms the basis of the heuristic justification of chaos in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=') To fix a convention, we will only look at subsets of V ◦ whose sizes are between |V ◦|/4 and 3|V ◦|/4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Given a region A ⊂ V ◦, let ∆(A) denote the energy cost of overturning all spins in A in the ground state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We are interested in showing that there is some set A with |V ◦|/4 ≤ |A| ≤ 3|V ◦|/4 such that the ratio ∆(A)/|∂A| is small, where ∂A be the edge- boundary of A — that is, the set of all edges from A to V \\ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (If ∂A = ∅, then ∆(A) = 0, and we then define this ratio to be zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=') To do this, let us define F := min �∆(A) |∂A| : A ⊆ V ◦, |V ◦| 4 ≤ |A| ≤ 3|V ◦| 4 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The following result shows that F is small with high probability whenever |V ◦| and |V ◦|/|B| larger than some power of log |E|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' As before, we first state the general result, and then specialize to the case of V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d in the corollary that follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 4 Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let all notations be as in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, and let F be defined as above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then E(F) ≤ C max{|V ◦|−1/(2β+2), (|B|/|V ◦|)1/(2δ+1)} � log |E|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' where C is a constant that depends only on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Recall that if V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d with the usual boundary (or free or periodic boundary), then |V ◦| is of order Ld, |B| is of order Ld−1, β = d, and δ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Additionally, note that |E| is of order Ld.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, we get the following corollary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the setting of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4, if V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d with the usual boundary and with any given disorder-independent boundary condition, then E(F) ≤ � C(d)L−1/4√log L if d = 1, C(d)L−1/3√log L if d ≥ 2, where C(d) depends only on d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For free or periodic boundary, the bound is E(F) ≤ C(d)L−d/(2d+2)� log L for all d ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It is not hard to show that for d = 1, the bound obtained above is suboptimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This is because with high probability we can find two edges e and f that are order L apart, where Je and Jf are both of order L−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Overturning all spins between e and f creates an overturned region whose size is of order L, but the energy cost is only of order L−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, the correct order of E(F) in d = 1 is L−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Presumably, the bound given by Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5 may be suboptimal for all d, but that is not clear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Nor is it clear what the correct order should be for d ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The physics literature is not unanimous about the size of F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For example, there are compet- ing claims, made via numerical studies, that in d = 3, the energy cost ∆(A) can be as small as O(L1/5) [15], or O(1) [38].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that for a macroscopic region A, |∂A| is at least of order L2 in d = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The main difficulty with simulation studies is that finding the ground state is an NP-hard problem, with no good algorithm even for the “average case”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Simulations can be carried out with only rather small values of L (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', L = 12 in [38]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' As a counterpart of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4, we now show that large regions with small interface en- ergies, whose existence is guaranteed by Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4, are actually exceptionally rare.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The probability that any given region has a small interface energy is exponentially small in the size of the boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This is the content of the next theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the setting of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, there are positive constants C1, C2 and C3 depend- ing only on the maximum degree of G, such that for any A ⊂ V ◦, P �∆(A) |∂A| < C1 � ≤ C2e−C3|∂A|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Our final result concerns the size of the so-called “critical droplet” of an edge, an object that has attracted some recent attention [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This is defined as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Take any edge e = {i, j}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ1 be the energy minimizing configuration under the constraint that σi = σj, and let σ2 be 5 the energy minimizing configuration under the constraint that σi = −σj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It is easy to see that σ1 and σ2 do not depend on the value of Je, and for any value of Je (keeping all other fixed), the ground state of the system is either σ1 or σ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The critical droplet is the set of sites where σ1 and σ2 disagree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Under the free boundary condition on G, this is not completely well-defined, because if a set A fits the above definition, then so does V \\ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In this case we define the size of the critical droplet (which is our main object of interest) as the minimum of |A| and |V \\ A|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let D(e) be the critical droplet of an edge e, in the setting of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The following theorem gives a lower bound on the expected value of the size of D(e).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let D(e) be as above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then, in the setting of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, 1 |E| � e∈E E|D(e)| ≥ C|V ◦| |E| max{|V ◦|−1/β, (|B|/|V ◦|)1/δ}, where C is a positive constant depending only on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Specializing to the case V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d with the usual boundary (or with free or periodic boundary), where |V ◦| and |E| are of order Ld, |B| is of order Ld−1, β = d and δ = 1, we obtain the following corollary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' If V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , Ld} with the usual boundary and with any given disorder- independent boundary condition (or with free or periodic boundary), then 1 |E| � e∈E E|D(e)| ≥ C(d)L for all d ≥ 1, where C(d) is a positive constant depending only on d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In particular, under the periodic boundary condition on V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d, E|D(e)| ≥ C(d)L for any e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This has the following consequence for d ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since |D(e)| ≤ |V |/2 = Ld/2 (due to the periodic boundary condition), an isoperimetric inequality of Bollob´as and Leader [13, Theorem 8] implies that |∂D(e)| ≥ min 1≤r≤d 2|D(e)|1−1/rrL(d/r)−1 ≥ min 1≤r≤d 2|D(e)|1−1/rr(2|D(e)|)((d/r)−1)/d ≥ 2|D(e)|1−1/d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, we get the following corollary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Take any d ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For V = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}d with periodic boundary condition, we have that for any edge e, E|∂D(e)|d/(d−1) ≥ C(d)L, where C(d) is a positive constant depending only on d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that ∂D(e) is the set of edge-spins that are overturned when Je is replaced by an independent copy, where we define the spin associated with an edge to be the product of the 6 spins at its endpoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This indicates (but does not prove) that edge-spins are also chaotic with respect to small perturbations of the disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' What it does prove is that for an edge e = {i, j}, the dependence of σiσj on Jf can decrease at most polynomially in the distance between e and f (if it decays at all).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This is in contrast to the one-dimensional situation, where the decay is exponential, as can be verified by explicitly writing down the ground state as a function of the disorder and the boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3 Related literature and open problems The phenomenon of chaos in lattice spin glasses was proposed in the physics literature by Fisher and Huse [30] and Bray and Moore [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Disorder chaos for the SK model was proved in [16, 17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In [16], it was also shown that the bond overlap in the EA model is not chaotic, in the sense that its value does not drop to zero under a small perturbation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This still leaves open the possibility that it drops to a nonzero value strictly less than the value at zero perturbation, which is the more nuanced definition of chaos for the bond overlap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Further investigations of disorder chaos in the mean-field setting were carried out in [10, 18, 19, 21–25, 29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The related notion of temperature chaos in mean-field models was investigated in [11, 20, 47, 52].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Connections with computational complexity were explored in [32, 35], and with noise sensitivity in [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the lattice setting, there are fewer results, reflecting the general dearth of rigorous results for short-range models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The absence of disorder chaos in the bond overlap of the EA model, in the narrow sense that was proved in [16, 17], has been recently generalized by Arguin and Hanson [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A very interesting connection between disorder chaos in the bond overlap and the presence of incongruent states (explained below) was proved by Arguin, Newman, and Stein [8], who showed that if there is no disorder chaos (in the stronger sense), then incongruent ground states cannot exist, at least as limits of finite volume ground states with disorder-independent boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The problem of incongruent ground states is one of the central open problems for short- range spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The problem is stated most clearly in the infinite volume setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Consider the EA Hamiltonian on the whole of Zd instead of a finite region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The notion of “minimizing the energy” no longer makes sense, but the difference between the energies of two states that differ only at a finite number of sites is well-defined and finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' An infinite volume state is called a ground state if overturning any finite number of spins results in an increase in the energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It was shown by Newman and Stein [41] that the number of ground states is almost surely equal to a constant depending on the dimension and the distribution of the disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The main question is whether this number is greater than two in some dimension and for some symmetric and continuous distribution of the disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Obviously, if σ is an infinite volume ground state, then so is −σ, and so the number of ground states is at least two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Two ground states that are not related in this way are called incongruent ground states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The above question is the same as asking whether there can exist a pair of incongruent ground states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This question remains unanswered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The greatest progress on this topic was made by Newman and Stein [43], who showed that in dimension two, if there is a pair of incongruent ground states, then there is a single doubly infinite “domain wall” dividing them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This result was used by Arguin, Damron, Newman, and Stein [5] to prove that there is a unique infinite volume ground state in the EA 7 model on the half-plane Z × {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='} under a certain sequence of boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The boundary condition was later eliminated by Arguin and Damron [3], who showed the number of ground state pairs for the EA model on the half-plane is either 1 or ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A related result by Berger and Tessler [12] shows that for the ground state of the EA model on Z2, “unsatisfied edges” (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', where σiσj ̸= sign(Jij)) do not percolate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The absence of incongruent infinite volume ground states, if true, will have the following consequence in finite volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Any two states that are nearly energy-minimizing will locally look like a pair of congruent states (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', either equal or negations of each other), although they may be globally quite different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The “almost orthogonal” states produced by the small perturbations of the disorder in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 may (or may not) have this property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the physics literature, this is known as “regional congruence” [36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' An important contribution to the study of the EA model from the mathematical literature is the concept of metastates, introduced by Aizenman and Wehr [1, 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A zero-temperature metastate is a measurable map taking the disorder in infinite volume to a probability measure on the set of ground states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Aizenman and Wehr [1, 2] showed that metastates exist, and an explicit construction and interpretation was given later by Newman and Stein [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Metastates capture some aspects of the chaotic nature of spin glasses, such as the “chaotic size dependence” proved in [41], which means that the ground state in a finite region is chaotic with respect to changes in the size of the region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For some recent results and different perspectives on metastates, see [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Another topic that has received considerable attention in the mathematical literature is the question of fluctuations of the ground state energy (and more generally, the free energy at any temperature).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This began with the aforementioned papers of Aizenman and Wehr [1, 2], who showed that the fluctuations are of the same order as the volume of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The motivation for studying fluctuations is that one can connect it to the question of phase transitions via the “Imry–Ma argument” [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This was made precise in [1, 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For further developments in the study of fluctuations, and especially the important topic of interface energy fluctuations, we refer to [6, 7, 27, 51] and references therein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' One of the great unsolved questions in spin glass theory concerns the validity of the “Parisi picture” [48] versus the “droplet theory” of Fisher and Huse [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Conclusively settling this con- troversy has remained out of the reach of rigorous mathematics until this day.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4 and Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5 in the present paper are related to this problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' As explained nicely in Krzakala and Martin [38], the Parisi picture implies that one can overturn all spins in a macroscopic subset of {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , L}3 with O(1) energy cost, whereas the droplet theory implies that the minimum cost grows as a small positive power of L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' While Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5 does not settle this debate, it is the first result to show that one can indeed find macroscopic regions with interface energies that are negligible compared to the size of the interface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A related question is the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ and σ′ be the ground states in the original and perturbed environments, as in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3 shows that E(σiσjσ′ iσ′ j) drops sharply to zero as the perturbation parameter p increases from 0 to a small positive value, if i and j are far apart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Now suppose i and j are neighbors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then one can show that E(σiσjσ′ iσ′ j) will not drop to zero — but does it drop sharply to a value less than 1?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' More precisely, is it true that under the periodic boundary condition, for neighboring i and j, lim p→0 lim L→∞ |E(σiσjσ′ iσ′ j)| < 1?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 8 This, too, would settle the debate between the Parisi picture versus the droplet theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The Parisi picture holds if the above is true;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' the droplet theory, if not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moreover, it would settle the longstanding question about the existence of incongruent ground states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Incongruent ground states exist if and only if the above inequality holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Acknowledgements I thank Louis-Pierre Arguin for helpful comments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This work was partially supported by NSF grants DMS-2113242 and DMS-2153654.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 2 Proofs 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 Let h0, h1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' be the orthonormal basis of normalized Hermite polynomials for L2(µ), where µ is the standard Gaussian distribution on R and h0 ≡ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then an orthonormal basis of L2(J) is formed by products like hn(J) := � e∈E hne(Je), where ne ∈ N := {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='} for each e, and n := (ne)e∈E ∈ NE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Any square-integrable function f(J) of the disorder J can be expanded in this basis as f(J) = � n∈NE �f(n)hn(J), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1) where �f(n) := E(f(J)hn(J)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The infinite series on the right side in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1) should be interpreted as the L2-limit of partial sums, where the order of summation is irrelevant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Now take any distinct i, j ∈ V ◦.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ be a ground state in the EA model on G (with or without a boundary condition).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Consider σiσj as a function φ(J) of the disorder J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This function is well-defined even if we do not impose a boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Obviously, it is in L2(J).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For any n ∈ NE, let E(n) be the set of edges e ∈ E such that ne > 0, and V (n) be the set of vertices that are endpoints of the edges in E(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then G(n) := (V (n), E(n)) is a subgraph of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The following lemma is the main ingredient for the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let all notations be as above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then �φ(n) = 0 unless both i and j are in V (n) and the connected components of G(n) containing i and j are either the same, or both intersect B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' First, suppose that i ∈ V (n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let A be the connected component of G(n) that contains i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Suppose that A ∩ (B ∪ {j}) = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let ∂A be the set of edges from A to Ac := V \\ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since A is a connected component of G(n), no edge in ∂A can be a member of E(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Define a new environment J′ as J′ e = � −Je if e ∈ ∂A, Je if e /∈ ∂A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 9 Note that J and J′ have the same law, since the disorder distribution is symmetric around zero, and the disorder variables are independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since ∂A ∩ E(n) = ∅, hn(J) does not depend on (Je)e∈∂A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, �φ(n) = E(φ(J)hn(J)) = E(E(φ(J)|(Je)e/∈∂A)hn(J)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2) But note that since J and J′ have the same law, and J′ e = Je for e /∈ ∂A, E(φ(J)|(Je)e/∈∂A) = E(φ(J′)|(Je)e/∈∂A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3) Now, let σ′ be the configuration defined as σ′ k = � −σk if k ∈ A, σk if k /∈ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then σ′ satisfies the given boundary condition (if any) since A ∩ B = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let us now split HJ′(σ′) as HJ′(σ′) = − � {k,l}∈E, k,l∈A Jkl(−σk)(−σl) − � {k,l}∈∂A (−Jkl)(−σkσl) − � {k,l}∈E, k,l∈Ac Jklσkσl = − � {k,l}∈E Jklσkσl = HJ(σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moreover, for any τ ∈ {−1, 1}V satisfying the given boundary condition (if any), HJ′(τ) = HJ(τ ′), where τ ′ i = −τi if i ∈ A and τ ′ i = τi if i /∈ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since τ ′ also satisfies the given boundary condition, this shows that σ′ minimizes HJ′, and so σ′ iσ′ j = φ(J′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But since j /∈ A and i ∈ A, σ′ iσ′ j = −σiσj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, φ(J′) = −φ(J), and so, by (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3), E(φ(J)|(Je)e/∈∂A) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Plugging this into (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2), we get that �φ(n) = 0 if i ∈ V (n) and A does not intersect B ∪ {j}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Next, suppose that i /∈ V (n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In this case, taking A := {i} and repeating the whole argument as above shows that �φ(n) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, �φ(n) = 0 unless i ∈ V (n) and A intersects B ∪ {j}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' By the symmetry between i and j, we conclude that �φ(n) = 0 unless j ∈ V (n) and the component of G(n) containing j intersects B ∪ {i}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combining these two conclusions yields the claim of the lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 gives the following key corollary, which says that if i and j are far apart and far away from the boundary, then the Hermite polynomial expansion of σiσj consists of only high degree terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' If �φ(n) ̸= 0, then |E(n)| ≥ min{d(i, j), d(i, B) + d(j, B)}, where d is the graph distance on G and d(i, B) := min{d(i, k) : k ∈ B} (which is infinity if B is empty).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 10 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Suppose that �φ(n) ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then by Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, i, j ∈ V (n) and the connected components containing i and j are either the same, or they both touch B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the first case, there is a path of edges in E(n) connecting i to j, which implies that |E(n)| ≥ d(i, j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the second case, there is a path in G(n) connecting i to B and another path in G(n) connecting j to B, which implies that |E(n)| ≥ d(i, B) + d(j, B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In the following, instead of using the parameter p for the perturbation, we will reparametrize p as 1 − e−t, where t ∈ (0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This is helpful for the following reason.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let J(t) := e−tJ + � 1 − e−2tJ′ = (1 − p)J + � 2p − p2J′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then J(t) is the perturbed environment for our first kind of perturbation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It is a standard fact that for any f ∈ L2(J), E(f(J(t))|J) = Ptf(J), where (Pt)t≥0 is the Ornstein–Uhlenbeck semigroup (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', [17, Chapter 2 and Chapter 6]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moreover, for each n ∈ NE, hn is an eigenfunction of the Ornstein–Uhlenbeck generator, with eigenvalue |n| := � e∈E ne.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This implies that for any f ∈ L2(J), Ptf(J) = � n∈NE e−|n|t �f(n)hn(J).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In particular, by the Parseval identity, E[(E(f(J(t))|J))2] = ∥Ptf(J)∥2 L2 = � n∈NE e−2|n|t �f(n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4) Now consider the second kind of perturbation, where each Je is replaced by an independent copy J′ e with probability p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let us again reparametrize p = 1 − e−t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Recall that h0 ≡ 1, and for n ∈ N \\ {0}, hn integrates to zero under the standard Gaussian measure on R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This implies that for any n ∈ NE, E(hn(J(t))|J) = (1 − p)δ(n)hn(J) = e−δ(n)thn(J), where δ(n) := |{e ∈ E : ne > 0}|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Therefore, in this case, E(f(J(t))|J) = � n∈NE e−δ(n)t �f(n)hn(J) and hence, E[(E(f(J(t))|J))2] = � n∈NE e−2δ(n)t �f(n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5) Combining the above observations with Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2, we get the following lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 11 Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ(t) be a ground state for the perturbed environment J(t), where the pertur- bation is either of the two kinds described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then for any distinct i, j ∈ V ◦, E[(E(σi(t)σj(t)|J))2] ≤ e−2t min{d(i,j),d(i,B)+d(j,B)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since δ(n) ≤ |n|, the inequalities (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4) and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5) shows that for either kind of perturba- tion, E[(E(σi(t)σj(t)|J))2] ≤ � n∈NE e−2δ(n)t �φ(n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' By Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2, we know that �φ(n) = 0 unless δ(n) ≥ min{d(i, j), d(i, B) + d(j, B)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combining this with the above inequality completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We are now ready to prove Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We will work with the reparametrization p = 1 − e−t, and write R(t) instead of R(p).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, by Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3, E(R(t)2) = 1 |V ◦|2 � i,j∈V ◦ E(σiσjσi(t)σj(t)) = 1 |V ◦|2 � i,j∈V ◦ E(σiσjE(σi(t)σj(t)|J)) ≤ 1 |V ◦|2 � i,j∈V ◦ E|E(σi(t)σj(t)|J)| ≤ 1 |V ◦|2 � i,j∈V ◦ � E[(E(σi(t)σj(t)|J))2] ≤ 1 |V ◦| + 1 |V ◦|2 � i,j∈V ◦, i̸=j e−t min{d(i,j),d(i,B)+d(j,B)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' For each k ∈ N, let Nk := |{(i, j) : i, j ∈ V ◦, i ̸= j, k/t ≤ min{d(i, j), d(i, B) + d(j, B)} ≤ (k + 1)/t}|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then note that � i,j∈V ◦, i̸=j e−t min{d(i,j),d(i,B)+d(j,B)} ≤ ∞ � k=0 Nke−k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='6) Since the left side is decreasing in t, it is not hard to see that it suffices to prove the theorem under the assumption that t ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' By the given conditions (and the assumption that t < 1), we have that for any i ∈ V ◦ and k ∈ N, the number of j ∈ V ◦ such that d(i, j) ≤ (k + 1)/t is at most α(k + 1)βt−β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, the number of pairs (i, j) such that d(i, j) ≤ (k + 1)/t is at most 12 α|V ◦|(k + 1)βt−β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Next, note that the number of i such that d(i, B) ≤ (k + 1)/t is at most |B|γ(k + 1)δt−δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, the number of pairs (i, j) such that d(i, B) + d(j, B) ≤ (k + 1)/t is at most |B|2γ2(k + 1)2δt−2δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combining, we get Nk ≤ α|V ◦|(k + 1)βt−β + |B|2γ2(k + 1)2δt−2δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Plugging this bound into (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='6), we get � i,j∈V ◦, i̸=j e−t min{d(i,j),d(i,B)+d(j,B)} ≤ C(|V ◦|t−β + |B|2t−2δ), where C depends only on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since t = − log(1 − p), there is a constant C′ such that t−1 ≤ C′p−1 for all p ∈ (0, 1 − e−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='2 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3 Let us reparametrize p = 1−e−t, as before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ′ := σ(t), in the notation of Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then by Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3, |E(σiσjσ′ iσ′ j)| = |E(σiσjE(σ′ iσ′ j|J))| ≤ E|E(σ′ iσ′ j|J)| ≤ � E[(E(σ′ iσ′ j|J))2] ≤ e−t min{d(i,j),d(i,B)+d(j,B)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Since e−t = 1 − p, this completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4 In the setting of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, take p = c max{|V ◦|−1/(β+1), (|B|/|V ◦|)2/(2δ+1)}, where c is a positive constant, to be chosen later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' By Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1, E(R(p)2) ≤ 1 |V ◦| + C(|V ◦|p−β + |B|2p−2δ) |V ◦|2 ≤ (1 + C)c−β−1pβ+1p−β + Cc−2δ−1p2δ+1p−2δ ≤ ((1 + C)c−β−1 + Cc−2δ−1)p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, we can choose c large enough, depending only on α, β, γ and δ, such that E(R(p)2) ≤ p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='7) Notice that to ensure that p ∈ (0, 1), we need |V ◦| and |V ◦|/|B| large enough (depending only on α, β, γ and δ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In fact, we can ensure that p ∈ (0, 1/2), and this is what we will assume 13 henceforth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It will be shown at the end of the proof that this assumption about |V ◦| and |V ◦|/|B| can be dropped.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let J(p) be the perturbed Hamiltonian, with the first kind of perturbation — that is, J(p) = (1− p)J + � 2p − p2J′, where J′ is an independent copy of J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ(p) be the ground state for the perturbed environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let A be the region where σ(p) disagrees with σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then note that |A| = |{i ∈ V ◦ : σiσi(p) = −1}| = 1 2(|{i ∈ V ◦ : σiσi(p) = −1}| + |V ◦| − |{i ∈ V ◦ : σiσi(p) = 1}|) = |V ◦| 2 − |V ◦|R(p) 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, by (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='7), P(||A| − |V ◦|/2| > |V ◦|/4) = P(|R(p)| > 1/2) ≤ 4E(R(p)2) ≤ 4p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='8) Next, note that σi(p)σj(p) = � −σiσj if {i, j} ∈ ∂A, σiσj otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='9) Thus, HJ(σ(p)) − HJ(σ) = 2 � {i,j}∈∂A Jijσiσj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' On the other hand, since σ(p) minimizes HJ(p), HJ(p)(σ) − HJ(p)(σ(p)) ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But note that by equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='9), HJ(p)(σ) − HJ(p)(σ(p)) = −2 � {i,j}∈∂A Jij(p)σiσj = −2(1 − p) � {i,j}∈∂A Jijσiσj − 2 � 2p − p2 � {i,j}∈∂A J′ ijσiσj = −(1 − p)(HJ(σ(p)) − HJ(σ)) − 2 � 2p − p2 � {i,j}∈∂A J′ ijσiσj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combining the last two displays, we get HJ(σ(p)) − HJ(σ) ≤ −2 � 2p − p2 1 − p � {i,j}∈∂A J′ ijσiσj ≤ 2 � 2p − p2 1 − p |∂A| max {i,j}∈E |J′ ij|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 14 But HJ(σ(p)) − HJ(σ) = ∆(A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, E �∆(A) |∂A| � ≤ 2 � 2p − p2 1 − p E( max {i,j}∈E |J′ ij|) ≤ 2 � 2p − p2 1 − p � 4 log |E|, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='10) where the last inequality follows from a standard fact about Gaussian random variables (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', [17, Equation (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3)]), and we interpret the ratio ∆(A)/|∂A| to be zero if A = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Now note that for any S ⊆ V ◦, ∆(S) ≤ 2|∂S| max e∈E |Je|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='11) Combining this with (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='8), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='10), and the fact that p < 1/2, we get E(F) ≤ E �∆(A) |∂A| � + 2E(1{||A|−|V ◦|/2|>|V ◦|/4} max e∈E |Je|) ≤ C � p log |E| + 2 � P(||A| − |V ◦|/2| > |V ◦|/4)E(max e∈E J2e ) ≤ C � p log |E| + C � pE(max e∈E J2e ), where C depends only on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Again, by standard facts about Gaussian random variables, it follows that E(max e∈E J2 e ) = 4E � log � e∈E eJ2 e /4 � ≤ 4 log � e∈E E(eJ2 e /4) = 4 log |E| + 4 log √ 2 ≤ 8 log |E|, where the last inequality holds because |E| ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4 under the assumption that |V ◦| and |V ◦|/|B| are larger than some constant depending on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' To fully complete the proof, let us now show that this assumption can be dropped.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that by (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='11), F is always bounded above by 2 maxe∈E |Je|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, we always have that E(F) ≤ 2 � 4 log |E|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This shows that by sufficiently increasing the constant C in the statement of the theorem, we can have the required inequality hold without the largeness assumption on |V ◦| and |V ◦|/|B|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='4 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='6 Take any edge e = {i, j} ∈ ∂A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let H1 (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', H2) be the minimum energy of the system subject to the constraints σi = σj (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', σi = −σj) and Je = 0, keeping all other edge weights intact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then the ground state energy is min{−Je + H1, Je + H2}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moreover, the ground state satisfies σi = σj if −Je + H1 < Je + H2 and σi = −σj if −Je + H1 > Je + H2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (Note that these are the only possibilities, since equality occurs with probability zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=') The conditions can be rewritten as Je > (H1 − H2)/2 and Je < (H1 − H2)/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, if we change the value of Je, the ground state does not change as long as the new value is on the same side of (H1 − H2)/2 as the old one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let us say that two edges are “neighbors” of each other if they share one common endpoint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It is not hard to see that |H1 − H2| is at most the sum of |Jf| over all edges f that are neighbors 15 of e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let Se denote this sum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We will say that e is a “special edge” if Je > Se + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that if e = {i, j} is special, then Je > 0 and σi = σj for the ground state σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' It is easy to see that one can choose a subset K ⊆ ∂A such that no two edges in K are neighbors of each other or have a common neighbor, and |K| ≥ c|∂A|, where c > 0 depends only on the maximum degree of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' We make two important observations about K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' First, note that the events {Je > Se + 2}, as e ranges over K, are independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, if X denotes the number of special edges in K, then X is a sum of independent Bernoulli random variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moreover, it is not hard to see that E(X) ≥ a|K| for some constant a > 0 depending only on the maximum degree of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Next, we claim that ∆(A) ≥ 2X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' To see this, note that if we replace Je by Je − 1 for any special edge e = {i, j} ∈ K, then the ground state does not change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But all other special edges in K remain special even after this operation, since no two edges in K are neighbors of each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, we can repeat this substitution successively for each special edge in K, keeping the ground state unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ denote the ground state in the environment J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let J′ denote the new environment obtained above, and σ′ denote the state obtained by overturning all the spins in A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then by the conclusion of the previous paragraph, HJ′(σ) ≤ HJ′(σ′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But note that σiσj = 1 for every special edge {i, j}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, HJ′(σ′) − HJ′(σ) = 2 � {i,j}∈∂A J′ ijσiσj = 2 � {i,j}∈∂A Jijσiσj − 2X = ∆(A) − 2X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This proves that ∆(A) ≥ 2X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' By the observations about X made above, it is now easy to complete the proof (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=', by Hoeffding’s concentration inequality).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='5 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='7 Consider the system perturbed by the second kind of perturbation, with parameter p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let X be the number of edges where Je is replaced by an independent copy J′ e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Then X is a Binomial(|E|, p) random variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A different way to cause the same perturbation is to first generate X from the Binomial(|E|, p) distribution, and then pick X distinct edges at random and replace the couplings by independent copies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let e1, e2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , eX denote these edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ0 = σ be the original ground state, and σk be the ground state after replacing Je1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' , Jek by independent copies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let σ′ = σX be the ground state after completing the whole replacement process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Let R(p) denote the site overlap between σ and σ′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Note that R(p)2 = 1 |V ◦|2 � i,j∈V ◦ σiσjσ′ iσ′ j = 2 |V ◦|2 � i,j∈V ◦ �1 2 − 1{σiσj̸=σ′ iσ′ j} � , 16 which implies that E(R(p)2) = 2 |V ◦|2 � i,j∈V ◦ �1 2 − P(σiσj ̸= σ′ iσ′ j) � (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='12) Now note that P(σiσj ̸= σ′ iσ′ j|X) ≤ X � k=1 P(σk−1 i σk−1 j ̸= σk i σk j |X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='13) Let �σ be the ground state after replacing the weight on one uniformly chosen edge by an inde- pendent copy in the original system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Given X, σk−1 has the same law as σ for any 1 ≤ k ≤ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Given X and σk−1, ek is uniformly distributed on E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, given X, (σk−1, σk) has the same distribution as (σ, �σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' This shows that for any 1 ≤ k ≤ X, P(σk−1 i σk−1 j ̸= σk i σk j |X) = P(σiσj ̸= �σi�σj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Plugging this into (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='13), we get P(σiσj ̸= σ′ iσ′ j|X) ≤ XP(σiσj ̸= �σi�σj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Taking expectation on both sides gives P(σiσj ̸= σ′ iσ′ j) ≤ |E|pP(σiσj ̸= �σi�σj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combining this with (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='12), we get � i,j∈V ◦ P(σiσj ̸= �σi�σj) ≥ 1 |E|p � i,j∈V ◦ P(σiσj ̸= σ′ iσ′ j) = |V ◦|2 2|E|p(1 − E(R(p)2)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Applying Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='1 to the right side gives � i,j∈V ◦ P(σiσj ̸= �σi�σj) ≥ |V ◦|2 2|E|p � 1 − 1 |V ◦| − C(|V ◦|p−β + |B|2p−2δ) |V ◦|2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Choosing p = c max{|V ◦|−1/β, (|B|/|V ◦|)1/δ} for some sufficiently large c (depending only on α, β, γ and δ), and assuming that |V ◦| and |V ◦|/|B| are sufficiently large (again, depending only on α, β, γ and δ), we can ensure that the term within the brackets on the right side above is at least 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, if |V ◦| and |V ◦|/|B| are large enough, then � i,j∈V ◦ P(σiσj ̸= �σi�σj) ≥ C|V ◦|2 |E| max{|V ◦|−1/β, (|B|/|V ◦|)1/δ}, for some C > 0 that depends only on α, β, γ and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' But the number of pairs (i, j) such that σiσj ̸= �σi�σj is equal to (|V ◦| − |A|)|A|, where A is the set of sites where σ disagrees with �σ (taking the smaller of two sets if B = ∅).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thus, E|A| ≥ 1 |V ◦|E[(|V ◦| − |A|)|A|] = 1 |V ◦| � i,j∈V ◦ P(σiσj ̸= �σi�σj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combining with the previous display completes the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 17 References [1] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Aizenman and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Wehr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Rounding of first-order phase transitions in systems with quenched disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 62(21):2503, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [2] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Aizenman and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Wehr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Rounding effects of quenched randomness on first-order phase transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Mathematical Physics, 130(3):489–528, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [3] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Damron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' On the number of ground states of the Edwards–Anderson spin glass model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Annales de l’IHP Probabilit´es et Statistiques, 50(1):28–62, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [4] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Hanson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' On absence of disorder chaos for spin glasses on Zd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Electronic Communications in Probability, 25:1–12, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [5] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Damron, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Uniqueness of ground states for short-range spin glasses in the half-plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Mathematical Physics, 300(3):641–657, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [6] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Wehr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Fluctuation bounds for interface free energies in spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Statistical Physics, 156(2):221–238, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [7] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Wehr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Zero-temperature fluctuations in short-range spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Statistical Physics, 163(5):1069–1078, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [8] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A relation between disorder chaos and incongruent states in spin glasses on Zd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Mathematical Physics, 367 (3):1019–1043, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [9] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Arguin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Ground state stability in two spin glass models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In In and Out of Equilibrium 3: Celebrating Vladas Sidoravicius, pages 17–25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Springer, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [10] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Auffinger and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Universality of chaos and ultrametricity in mixed p-spin models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications on Pure and Applied Mathematics, 69(11):2107–2130, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [11] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Ben Arous, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Subag, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Zeitouni.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Geometry and temperature chaos in mixed spherical spin glasses at low temperature: the perturbative regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications on Pure and Applied Mathematics, 73(8):1732–1828, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [12] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Berger and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Tessler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' No percolation in low temperature spin glass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Electronic Journal of Probability, 22:1–19, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [13] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Bollob´as and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Leader.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Edge-isoperimetric inequalities in the grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Combinatorica, 11 (4):299–314, 1991.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [14] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Bray and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moore.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Critical behavior of the three-dimensional Ising spin glass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review B, 31(1):631, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 18 [15] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Bray and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Moore.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chaotic nature of the spin-glass phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 58(1):57, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [16] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chatterjee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Disorder chaos and multiple valleys in spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' arXiv preprint arXiv:0907.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='3381, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [17] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chatterjee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Superconcentration and Related Topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Springer, Cham, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [18] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Disorder chaos in the Sherrington–Kirkpatrick model with external field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Annals of Probability, 41(5):3345–3391, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [19] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chaos in the mixed even-spin models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Mathematical Physics, 328(3):867–901, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [20] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Panchenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Temperature chaos in some spherical mixed p-spin models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Statistical Physics, 166(5):1151–1162, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [21] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Panchenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Disorder chaos in some diluted spin glass models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Annals of Applied Probability, 28(3):1356–1378, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [22] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Sen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Parisi formula, disorder chaos and fluctuation for the ground state energy in the spherical mixed p-spin models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Mathematical Physics, 350(1):129–173, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [23] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Hsieh, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Hwang, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Sheu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Disorder chaos in the spherical mean-field model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Statistical Physics, 160(2):417–429, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [24] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Dey, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Panchenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Fluctuations of the free energy in the mixed p-spin models with external field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Probability Theory and Related Fields, 168(1):41–53, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [25] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Handschy, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Lerman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' On the energy landscape of the mixed even p-spin model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Probability Theory and Related Fields, 171(1):53–95, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [26] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Cotar, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Jahnel, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' K¨ulske.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Extremal decomposition for random Gibbs measures: from general metastates to metastates on extremal random Gibbs measures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Electronic Communications in Probability, 23:1–12, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [27] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Dario, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Harel, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Peled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Quantitative disorder effects in low-dimensional spin systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' arXiv preprint arXiv:2101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='01711, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [28] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Edwards and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Anderson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Theory of spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Physics F: Metal Physics, 5(5):965, 1975.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [29] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Eldan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A simple approach to chaos for p-spin models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Statistical Physics, 181(4):1266–1276, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [30] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Fisher and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Huse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Ordered phase of short-range Ising spin-glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 56(15):1601, 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 19 [31] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Fisher and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Huse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Equilibrium behavior of the spin-glass ordered phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physi- cal Review B, 38(1):386, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [32] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Gamarnik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The overlap gap property: A topological barrier to optimizing over random structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences, 118(41):e2108492118, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [33] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Garban and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Steif.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Noise sensitivity of Boolean functions and percolation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Cam- bridge University Press, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [34] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Garey and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Johnson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Computers and Intractability: A guide to the theory of NP-completeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Freeman, San Francisco, CA, 1979.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [35] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Huang and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Sellke.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Tight Lipschitz hardness for optimizing mean field spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' arXiv preprint arXiv:2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='07847, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [36] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Huse and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Fisher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Pure states in spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Physics A: Mathemat- ical and General, 20(15):L997, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [37] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Imry and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Ma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Random-field instability of the ordered state of continuous symme- try.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 35(21):1399, 1975.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [38] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Krzakala and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Martin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Spin and link overlaps in three-dimensional spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 85(14):3013, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [39] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' McMillan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Scaling theory of Ising spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Physics C: Solid State Physics, 17(18):3179, 1984.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [40] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M´ezard, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Parisi, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Virasoro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Spin glass theory and beyond: An Introduc- tion to the Replica Method and Its Applications, volume 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' World Scientific Publishing Company, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [41] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Multiple states and thermodynamic limits in short-ranged Ising spin-glass models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review B, 46(2):973, 1992.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [42] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Thermodynamic chaos and the structure of short-range spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In Mathematical aspects of spin glasses and neural networks, pages 243–287.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Springer, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [43] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Are there incongruent ground states in 2d Edwards– Anderson spin glasses?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Mathematical Physics, 224(1):205–218, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [44] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Newman and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Ordering and broken symmetry in short-ranged spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Journal of Physics: Condensed Matter, 15(32):R1319–R1364, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [45] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Palassini and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Young.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Nature of the spin glass state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 85 (14):3017, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [46] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Panchenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The Sherrington–Kirkpatrick model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Springer Science & Business Media, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 20 [47] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Panchenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Chaos in temperature in generic 2p-spin models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Communications in Math- ematical Physics, 346(2):703–739, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [48] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Parisi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Mean field theory of spin glasses: Statics and dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In Complex Systems, volume 85 of Les Houches, pages 131–178.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Elsevier, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [49] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Sherrington and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Kirkpatrick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Solvable model of a spin-glass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Physical Review Letters, 35(26):1792, 1975.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [50] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Shih, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Wu, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Kuo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Unifying maximum cut and minimum cut of a planar graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' IEEE Transactions on Computers, 39(5):694–697, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [51] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Stein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Frustration and fluctuations in systems with quenched disorder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' In Pwa90: A Lifetime of Emergence, pages 169–186.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' World Scientific, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [52] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Subag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' The geometry of the Gibbs measure of pure spherical spin glasses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Inventiones Mathematicae, 210(1):135–209, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [53] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Talagrand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Mean field models for spin glasses: Volume I: Basic examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Springer Science & Business Media, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' [54] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Talagrand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Mean Field Models for Spin Glasses: Volume II: Advanced Replica- Symmetry and Low Temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' Springer Science & Business Media, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} +page_content=' 21' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/e9E2T4oBgHgl3EQfxgi1/content/2301.04112v1.pdf'} diff --git a/etE0T4oBgHgl3EQf5wIr/vector_store/index.faiss b/etE0T4oBgHgl3EQf5wIr/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..9369fd9cb8812b9824a0241c8fce8d4379fe59fa --- /dev/null +++ b/etE0T4oBgHgl3EQf5wIr/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:179b3c1f726d7fa0b532447f1700200a8ebecc2aa33bb36197232633d5e38c70 +size 5898285 diff --git a/etFJT4oBgHgl3EQfTSwk/vector_store/index.pkl b/etFJT4oBgHgl3EQfTSwk/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a976ec7aaebac78bb85475f59566894e2a3ec8e3 --- /dev/null +++ b/etFJT4oBgHgl3EQfTSwk/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62c8e8fa4595ab60d6444c19c5ee93f8dcc9c3173fdd8b6f67b8d3de8cf6419b +size 127372 diff --git a/gdE0T4oBgHgl3EQf6QJf/content/2301.02761v1.pdf b/gdE0T4oBgHgl3EQf6QJf/content/2301.02761v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c48443aa7e7daa81c6540c066cdc6dcc02e6eb71 --- /dev/null +++ b/gdE0T4oBgHgl3EQf6QJf/content/2301.02761v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7e1863f5ffb8e1b28320e6bddca9bba65fe9702128cdbaa0b42d07932696565 +size 3418200 diff --git a/gdE0T4oBgHgl3EQf6QJf/vector_store/index.faiss b/gdE0T4oBgHgl3EQf6QJf/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..76011781a133d8049eaeed9ac1dda3f59ff0486a --- /dev/null +++ b/gdE0T4oBgHgl3EQf6QJf/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:045884c5483b4f11e9479dd4db4a2a7f05b0b56820a3a3efbd7e6f8ac3b64210 +size 2883629 diff --git a/gdE0T4oBgHgl3EQf6QJf/vector_store/index.pkl b/gdE0T4oBgHgl3EQf6QJf/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d5ae0d7a5bc1343b1863bf45aaa46aee12a6b71a --- /dev/null +++ b/gdE0T4oBgHgl3EQf6QJf/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c3e67b209de17645d4d1b8134b5700c3db54b89b6867d3968bc1830241f2c3e +size 117188 diff --git a/hdFLT4oBgHgl3EQfaC_d/content/tmp_files/2301.12072v1.pdf.txt b/hdFLT4oBgHgl3EQfaC_d/content/tmp_files/2301.12072v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..782ebec7f5d3afc750bb4995e37ce7d071c6a0b3 --- /dev/null +++ b/hdFLT4oBgHgl3EQfaC_d/content/tmp_files/2301.12072v1.pdf.txt @@ -0,0 +1,1474 @@ +arXiv:2301.12072v1 [q-fin.CP] 28 Jan 2023 +Unbiased estimators for the Heston model with stochastic +interest rates +Chao Zheng ∗and Jiangtao Pan +School of Data Sciences, Zhejiang University of Finance and Economics, +Hangzhou, China +Abstract +We combine the unbiased estimators in Rhee and Glynn (Operations Research: +63(5), 1026-1043, 2015) and the Heston model with stochastic interest rates. Specif- +ically, we first develop a semi-exact log-Euler scheme for the Heston model with +stochastic interest rates, and then, under mild assumptions, we show that the con- +vergence rate in L2 norm is O(h), where h is the step size. +The result applies +to a large class of models, such as the Heston-Hull-While model, the Heston-CIR +model and the Heston-Black-Karasinski model. Numerical experiments confirm our +theoretical convergence rate. +Keywords: Heston model, stochastic interest rates, unbiased estimators, conver- +gence rate, Heston-Hull-While model, Heston-CIR model +AMS subject classifications (2000): 60H35, 65C30, 91G60 +1 +Introduction +The classical Heston model (Heston [11]) is one of the fundamental models in finance, +and it has been widely applied in various financial markets, such as the equity, the fixed +income and the foreign exchange markets, due to its tractability in modelling the term +structure of implied volatility. However, in this model, the interest rate is constant, and +this assumption is often not appropriate for long-maturity options, because the long-term +behaviour of the interest rate is typically far from being constant. This phenomenon +has been empirically investigated by Bakshi, Cao and Chen [2]. A natural extension +of the Heston model is to add a stochastic interest rate, which is usually referred to +as the Heston model with stochastic interest rates. There are several contributions in +this direction, such as Grzelak and Oosterlee [10], Van Haastrecht and Pelsser [18] and +references therein. +∗Email: chao.zheng12@gmail.com. This research is supported by National Natural Science Founda- +tion of China (No. 11801504). +1 + +Under the Heston model with stochastic interest rates, the price of an option can be +written as +E +� +e− +´ T +0 rtdtP(S) +� +where S is the solution to the Heston model with stochastic interest rates and rt is the +underlying interest rate. Here, P denotes the payoff functional of an option. We are +interested in calculating this expectation, as for the majority of options, there are no +closed-form formulas. A classical approach is to use a Monte Carlo method associated +with a time-discrete scheme on S for an approximate value. Recently, Rhee and Glynn +[17] proposed several unbiased Monte Carlo estimators based on a randomization idea +for a stochastic differential equation, which are unbiased versions of multilevel Monte +Carlo estimators by Giles [8]. These unbiased estimators have a clear advantage over +the standard Monte Carlo estimator, as the latter is typically biased when S has to be +approximated through a time-discrete scheme. To combine Rhee and Glynn’s estimators +and the Heston model with stochastic interest rates, it is essential to develop a numeri- +cal scheme with a good convergence rate in L2 norm. However, the standard theorems, +such as those in Kloeden and Platen [13], require that the drift and diffusion coefficients +satisfy the global Lipschitz and linear growth assumptions, which are not satisfied by +the Heston model with stochastic interest rates. Research on developing time-discrete +schemes for the Heston model with stochastic interest rates is scarce. Cozma, Mariapra- +gassam and Reisinger [6] proposed a different log-Euler scheme for the stochastic-local +volatility model with stochastic rates, which includes the model we consider, and they +demonstrated strong convergence without providing a rate. This is the only reference +we are aware of concerning the convergence of Monte Carlo algorithms for the Heston +model with stochastic interest rates. +In this article, we develop a semi-exact log-Euler scheme for the Heston model with +stochastic interest rates, where the driven Brownian motion for interest rate models is +independent of the driven Brownian motions for the Heston component. The scheme +is an extension of those in Mickel and Neuenkirch [14] and Zheng [19] for the classical +Heston model. Under mild assumptions on interest rate models and the assumption that +the payoff of an option is Lipschitz continuous and bounded, we show that the underlying +scheme converges with order one in L2 norm. There are two advantages of the scheme we +develop. One is that the convergence rate is free of Feller’s index, i.e., the convergence +rate is valid for the full range of parameters in the Heston component of the Heston +model with stochastic interest rates. The other is that the convergence rate is higher +than the usual convergence rate (one-half in L2 norm) of the standard Euler scheme +under standard assumptions. When a numerical scheme has a convergence rate higher +than one-half, it is convenient to combine it into Rhee and Glynn’s unbiased estimators. +Our result applies to a large class of models, including the Heston-Hull-While model, the +Heston-CIR model and the Heston-Black-Karasinski model, among which the Heston- +Hull-White model and the Heston-CIR model are particularly attractive in practical +applications. We refer readers to Grzelak and Oosterlee [10] for more discussions. +The remainder of the article is organized as follows. +In section 2, we review the +Heston model with stochastic interest rates and develop a log-Euler scheme for it. Section +2 + +3 reviews the unbiased estimators from Rhee and Glynn [17]. In section 4, we derive the +relevant convergence rate under several mild assumptions. Section 5 illustrates numerical +results to support our theoretical analysis. +2 +Heston model with stochastic interest rates +Let (Ω, F, (Ft)t≥0, P) be a filtered probability space satisfying the usual assumptions. +The Heston model with stochastic interest rates is of the form +dSt = rtStdt + +� +VtSt(ρdW 1 +t + +� +1 − ρ2dW 2 +t ) +dVt = k(θ − Vt)dt + σ +� +VtdW 1 +t , +where (W 1 +t )t≥0 and (W 2 +t )t≥0 are two independent Ft-adapted Brownian motions and +the parameters k, θ, σ > 0 and ρ ∈ [−1, 1]. Here, (rt)t≥0 is a stochastic interest rate. +The classical interest rate models and their generalizations can be found at Brigo and +Mercurio [4]. Among them, a large class of interest rate models can be written as +drt = µ(t, rt)dt + φ(t, rt)dW 3 +t , +where µ, φ : [0, T] × R → R are continuous functions and (W 3 +t )t≥0 is a Ft-adapted +Brownian motion. We assume that (W 3 +t )t≥0 is independent of (W 1 +t )t≥0 and (W 2 +t )t≥0. +Furthermore, we assume that there is a unique solution to the equation of rt above. +Let Xt = ln(St). By using Itˆo’s lemma, we have +dXt = +� +rt − 1 +2Vt +� +dt + +� +Vt +� +ρdW 1 +t + +� +1 − ρ2dW 2 +t +� +. +Then substituting the equation of Vt into the equation above, we obtain +dXt = +�� +rt − kρθ +σ +� ++ +�kρ +σ − 1 +2 +� +Vt +� +dt + ρ +σdVt + +� +1 − ρ2� +VtdW 2 +t . +Since (Vt)t≥0 is independent of (W 2 +t )t≥0, the stochastic integral +´ T +0 +√VtdW 2 +t is normally +distributed with mean 0 and variance +´ T +0 Vtdt. Therefore, the solution at any finite time +horizon T > 0 can be written as +XT = X0 + +�ˆ T +0 +rtdt + +�ρk +σ − 1 +2 +� ˆ T +0 +Vtdt + ρ +σ (VT − V0 − kθT) ++ +� +1 − ρ2 +�ˆ T +0 +VtdtN + + +(1) +where N is a standard normal random variable, that is independent of (Vt)t∈[0,T]. Note +that N is also independent of (rt)t∈[0,T], because the driving Brownian motion (W 3 +t )t∈[0,T] +of (rt)t∈[0,T] is independent of (W 2 +t )t∈[0,T] and (V 2 +t )t∈[0,T]. We see that there are several +integrals in equation (1) to be approximated. +3 + +It is known that Vt follows a scaled noncentral chi-squared distribution given Vu for +any u ∈ [0, t), i.e., +Vt +d= σ2(1 − e−k(t−u)) +4k +χ2 +d +� +4ke−k(t−u) +σ2(1 − e−k(t−u))Vu +� +, +where χ2 +d(λ) denotes a non-central chi-squared random variable with degrees of freedom +d = 4kθ +σ2 > 0 and noncentrality parameter λ > 0 (see Glasserman [9]). Hence, Vt can be +sampled exactly. Let (ˆrih)i=1,..,T/h be an approximate path of (rt)t∈[0,T]. It is convenient +to approximate +ˆ T +0 +rtdt ≈ +T/h−1 +� +i=0 +ˆrihh, +ˆ T +0 +Vtdt ≈ +T/h−1 +� +i=0 +Vihh, +using the Euler scheme based on step size h. We denote by ˆXh +T the approximated solution +of XT . Let ˆSh +T := e ˆ +Xh +T , so that ˆSh +T is an approximation of ST. +3 +Unbiased estimators for SDEs +In this section, we review the unbiased estimators introduced in Rhee and Glynn [17]. +The prices of many options can be expressed as +E(Y ) := E +� +e− +´ T +0 rtdtP(ST ) +� +where P is the payoff functional and Y ∈ L2 (i.e., E(Y 2) < ∞). +To estimate this +expectation, Rhee and Glynn [17] proposed an estimator +Z = +N +� +n=0 +∆n/P(N ≥ n) +where ∆n = Yn − Yn−1 and Yn, n ∈ N, is an approximation of Y with step size T/2n +and Y−1 = 0. In this article, we let +Yn = e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ), +h = T/2n. +Here, N is a nonnegative integer-valued random variable that is independent of Yn. This +estimator is usually refereed to as the coupled sum estimator. There are other unbiased +estimators constructed in a similar way (see Rhee and Glynn [17]). +Suppose that Yn converges to Y in L2 norm as n → ∞. Theorem 1 of Rhee and +Glynn [17] showed that if +∞ +� +n=1 +E +� +(Yn−1 − Y )2� +P(N ≥ n) +< ∞ +(2) +4 + +then Z is an unbiased estimator of E(Y ) (i.e., E(Z) = E(Y )) with a finite variance. +Furthermore, the average computational time of Z is proportional to +∞ +� +n=0 +2nP(N ≥ n). +(3) +Therefore, if E +� +(Yn − Y )2� += O(2−2np) = O(h2p) with p > 1/2 (Here, p is the con- +vergence rate in L2 norm), we can easily construct a distribution for N such that +P(N ≥ n) = O(2−n(p+1/2)) to ensure that (2) and (3) are finite. The optimal distri- +bution of N can be calculated based on minimizing the product of the variance and the +average computational time of Z; see Rhee and Glynn [17] for more discussions. Hence, +it is important to investigate the convergence rate of E +� +(Yn − Y )2� +. +4 +Convergence analysis +Recall that (rt)t∈[0,T] follows the stochastic differential equation +drt = µ(t, rt)dt + φ(t, rt)dW 3 +t , +where (W 3 +t )t∈[0,T] is independent of (W 1 +t )t∈[0,T] and (W 2 +t )t∈[0,T]. Let c and cn be constants +regardless of their values, where cn relies on n ∈ N. Our analysis throughout this article +is based on the following assumption: +Assumption 1. For any n ∈ N, it holds that +sup +t∈[0,T] +E +� +(µ(t, rt))2n� +< ∞, +sup +t∈[0,T] +E +� +(φ(t, rt))2n� +< ∞, +and the approximate interest rate (ˆrih)i=1,..,T/h satisfies +max +i=1,..,T/hE +� +(ˆrih − rih)2n� +< cnh2n. +Remark 1. A typical time-discrete scheme that may satisfy Assumption 1 is the Mil- +stein scheme. Under some standard assumptions on model coefficients (e.g., Lipschitz +continuity, linear growth), the convergence rate in L2 norm is one. +Theorem 4.1. Under Assumption 1, we have +E +�� +XT − ˆXh +T +�2n� += O(h2n), +∀n ∈ N+. +5 + +Proof. Straightforward calculation verifies that +E +�� +XT − ˆXh +T +�2n� +≤ cnE + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +2n + + cnE + + + + +ˆ T +0 +Vtdt − +T/h−1 +� +i=0 +Vihh + + +2n + ++ cnE + + + + + +�ˆ T +0 +Vtdt − +� +� +� +� +T/h−1 +� +i=0 +Vihh + + + +2n + , +(4) +where the Cauchy-Schwarz inequality implies that +E + + + + + +�ˆ T +0 +Vtdt − +� +� +� +� +T/h−1 +� +i=0 +Vihh + + + +2n + += E + + + + + +´ T +0 Vtdt − �T/h−1 +i=0 +Vihh +�´ T +0 Vtdt + +��T/h−1 +i=0 +Vihh + + + +2n + +≤ +� +� +� +� +�E + + + + +ˆ T +0 +Vtdt − +T/h−1 +� +i=0 +Vihh + + +4n + · +� +� +� +� +�E + + +� +1 +´ T +0 Vtdt +�2n +. +(5) +From Theorem 4.1(a) in Dufresne [7], we learn that +E + + +� +1 +´ T +0 Vtdt +�2n + < ∞ +for any n ∈ N. Thus, it suffices to investigate the two quantities +E + + + + +ˆ T +0 +Vtdt − +T/h−1 +� +i=0 +Vihh + + +2n + , +E + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +2n + . +Let us focus on the quantity of rt, because the analysis of the quantity of Vt is similar. +It holds that +E + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +2n + +≤ cnE + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +rihh + + +2n + + cnE + + + + +T/h−1 +� +i=0 +(ˆrih − rih)h + + +2n + . +(6) +6 + +Let η(t) := max{lh : lh ≤ t, l = 0, 1, 2, ...}. For the first term of (6), we have +E + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +rihh + + +2n + += E +��ˆ T +0 +rη(t)dt − +ˆ T +0 +rtdt +�2n� +≤ cnE + + +�ˆ T +0 +�ˆ t +η(t) +µ(u, ru)du +� +dt +�2n + + cnE + + +�ˆ T +0 +�ˆ t +η(t) +φ(u, ru)dW 3 +u +� +dt +�2n + . +(7) +An application of the Fubini theorem yields +ˆ T +0 +�ˆ t +η(t) +µ(u, ru)du +� +dt = +ˆ T +0 +�ˆ η(u)+h +u +µ(u, ru)dt +� +du += h +ˆ T +0 +� +1 + η(u) − u +h +� +µ(u, ru)du. +Hence, with substitution of the equation above, it follows from Jensen’s inequality that +E + + +�ˆ T +0 +�ˆ t +η(t) +µ(u, ru)du +� +dt +�2n + ≤ cnh2n +ˆ T +0 +E[µ2n(u, ru)]du = O(h2n). +(8) +Furthermore, we obtain from the stochastic Fubini theorem (see Theorem 65, Protter +[16]) and the Burkholder-Davies-Gundy inequality that +E + + +�ˆ T +0 +�ˆ t +η(t) +φ(u, ru)dW 3 +u +� +dt +�2n + += h2nE +��ˆ T +0 +� +1 + η(u) − u +h +� +φ(u, ru)dW 3 +u +�2n� +≤ cnh2nE +��ˆ T +0 +φ2(u, ru)du +�n� +≤ cnh2n +ˆ T +0 +E[φ2n(u, ru)]du = O(h2n). +(9) +Therefore, by using (8) and (9), we show that (7) is O(h2n). For the second term of (6), +by Jensen’s inequality, we have +E + + + + +T/h−1 +� +i=0 +(ˆrih − rih)h + + +2n + ≤ +T/h−1 +� +i=0 +E +� +(ˆrih − rih)2n� +h = O(h2n). +(10) +7 + +Thus, combining (7) and (10) into (6), we conclude that E +��´ T +0 rtdt − �T/h−1 +i=0 +ˆrihh +�2n� += +O(h2n). It can be proven analogously that E +��´ T +0 Vtdt − �T/h−1 +i=0 +Vihh +�2n� += O(h2n). +Finally, we substitute (5) and the two O(h2n) terms above into (4) to complete the +proof. +Then we proceed to the convergence rate of the underlying log-Euler scheme to +approximate the price of an option. This requires us to impose a boundedness assumption +on the option payoff. +Assumption 2. The payoff P : [0, +∞) → R is Lipschitz continuous and there exists a +constant C > 0, such that P(U) = P(C) for all U > C. +Under Assumption 2, it holds that +|P(U1) − P(U2)| ≤ c| ln U1 − ln U2| +(11) +for all U1, U2 ∈ [0, +∞), see Thereom 3.1 in Zheng [19]. The payoff that satisfies As- +sumption 2 is bounded, which is typically suitable for a put-style option. The put-style +option becomes worthless when the price of the underlying asset ST is sufficiently high. +For example, the standard European put option has the payoff P(ST ) := max{K−ST , 0} +with K > 0, which satisfies Assumption 2. +Theorem 4.2. Suppose that Assumptions 1 and 2 are satisfied. Suppose that (rt)t∈[0,T] +and its approximation (ˆrih)i=1,2,..,T/h are nonnegative. Then, we have +E +�� +e− +´ T +0 rtdtP(ST ) +�2� +< ∞ +and +E +�� +e− ´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� += O(h2). +Proof. For the first term, since (rt)t∈[0,T] is nonnegative and Assumption 2 implies that +P is bounded, it is trivial that E +�� +e− ´ T +0 rtdtP(ST ) +�2� +< ∞. For the second term, we +have +E +�� +e− ´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� += E +�� +e− +´ T +0 rtdt � +P(ST ) − P( ˆSh +T ) +� ++ P( ˆSh +T ) +� +e− +´ T +0 rtdt − e− �T/h−1 +i=0 +ˆrihh��2� +≤ 2E +� +e−2 +´ T +0 rtdt � +P(ST ) − P( ˆSh +T ) +�2� ++ 2E +� +P 2( ˆSh +T ) +� +e− +´ T +0 rtdt − e− �T/h−1 +i=0 +ˆrihh�2� +. +(12) +8 + +As it holds that +|e−x − e−y| ≤ |x − y| +for any x, y ≥ 0 and the processes (rt)t∈[0,T] and (ˆrih)i=1,2,..,T/h are nonnegative, we +obtain +E +� +P 2( ˆSh +T ) +� +e− +´ T +0 rtdt − e− �T/h−1 +i=0 +ˆrihh�2� +≤ cE + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +2 + = O(h2). +(13) +The right-hand side of (13) is O(h2); see the proof of Theorem 4.1. By using the Cauchy- +Schwarz inequality, inequality (11) and Theorem 4.1, we have +E +� +e−2 ´ T +0 rtdt � +P(ST ) − P( ˆSh +T ) +�2� +≤ +� +E +� +e−4 ´ T +0 rtdt� +· +� +E +�� +P(ST ) − P( ˆSh +T ) +�4� +≤ c +� +E +�� +ln(ST ) − ln( ˆSh +T ) +�4� += O(h2), +(14) +where E +� +e−4 ´ T +0 rtdt� +< ∞, since rt is nonnegative. With (13) and (14) substituted into +(12), the proof is complete. +We emphasize that Theorem 4.2 is for nonnegative interest rate processes, i.e., P(rt ≥ +0, ∀t ∈ [0, T]) = 1, which are satisfied by a large class of interest rate models. However, +if rt can be negative, for example when rt follows the Hull-White model, then inequality +(13) may not be satisfied. To address this problem, we establish Theorem 4.3 below: +Theorem 4.3. Suppose that Assumptions 1 and Assumption 2 are satisfied. Suppose +that E +� +e−4 ´ T +0 rtdt� +< ∞ and E +� +e−4 �T/h−1 +i=0 +ˆrihh� +< ∞. Then, we have +E +�� +e− +´ T +0 rtdtP(ST ) +�2� +< ∞ +and +E +�� +e− +´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� += O(h2). +Proof. For the first term, it follows from Jensen’s inequality that +E +�� +e− +´ T +0 rtdtP(ST ) +�2� +< cE +� +e−2 +´ T +0 rtdt� +< c +� +E +� +e−4 ´ T +0 rtdt� +< ∞. +9 + +Then, we focus on the second term. The classical Taylor expansion, together with the +Cauchy-Schwarz inequality, gives +E +�� +e− ´ T +0 rtdt − e− �T/h−1 +i=0 +ˆrihh�2� += E + +e−2ε + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +2 + +≤ +� +E +� +max(e−4 ´ T +0 rtdt, e−4 �T/h−1 +i=0 +ˆrihh) +� +· +� +� +� +� +�E + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +4 + +where ε is between +´ T +0 rtdt and �T/h−1 +i=0 +ˆrihh. We have proved in Theorem 4.1 that +E + + + + +ˆ T +0 +rtdt − +T/h−1 +� +i=0 +ˆrihh + + +4 + = O(h4). +Note that +E +� +max(e−4 ´ T +0 rtdt, e−4 �T/h−1 +i=0 +ˆrihh) +� +≤ E +� +e−4 ´ T +0 rtdt� ++ E +� +e−4 �T/h−1 +i=0 +ˆrihh� +< ∞. +Consequently +E +�� +e− +´ T +0 rtdt − e− �T/h−1 +i=0 +ˆrihh�2� += O(h2). +The remaining of the proof can be easily completed by using the inequality +E +�� +e− ´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� +≤ 2E +� +e−2 +´ T +0 rtdt � +P(ST ) − P( ˆSh +T ) +�2� ++ 2E +� +P 2( ˆSh +T ) +� +e− +´ T +0 rtdt − e− �T/h−1 +i=0 +ˆrihh�2� +and following similar steps as in Theorem 4.2. +5 +Applications +In this section, we apply our results from Section 4 to several well-known interest rate +models in finance, including the CIR model, the Hull-White model and the Black- +Karasinski model. We always assume that the payoff P satisfies Assumption 2. +5.1 +Heston-CIR model +The CIR model, which was introduced by Cox, Ingersoll and Ross [5], is represented as +drt = α(β − rt)dt + γ√rtdW 3 +t , +10 + +where α, β, γ > 0. It is known that rt follows a scaled noncentral chi-squared distribution +given ru, u ∈ [0, t), i.e., +rt +d= γ2(1 − e−α(t−u)) +4α +χ2 +d +� +4αe−k(t−u) +σ2(1 − e−α(t−u))ru +� +, +where χ2 +d(λ) denotes a noncentral chi-squared random variable with degrees of freedom +d = 4αβ +γ2 and noncentrality parameter λ (see Glasserman [9]). For the exact simulation +of rt, i.e., ˆrt = rt, t ∈ [0, T], we have supt∈[0,T] E(rn +t ) < ∞ for any n ∈ N. Hence, it is +easy to verify that Assumption 1 is satisfied. As P(rt ≥ 0, ∀t ∈ [0, T]) = 1, it follows +from Theorem 4.2 that +E +�� +e− ´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� += O(h2) +(15) +for the full parameter regime of the Heston-CIR model. +Since the exact simulation of the CIR process can be time-consuming, there are sev- +eral time-discrete schemes for the CIR process (see Alfonsi [1] for discussions). Neuenkirch +and Szpruch [15] showed that the BEM scheme and the drift-implicit Milstein scheme +preserve the nonnegativity of the CIR process, i.e., P(ˆrt ≥ 0, ∀t ∈ [0, T]) = 1 and both +of them are strongly convergent with order one when 2αβ +γ2 > 3. Specifically, for the BEM +scheme, Proposition 3.1 in Neuenkirch and Szpruch [15] demonstrated that +E +� +max +i=1,..,T/h(ˆrih − rih)p +� +< cnhp, +if 2 ≤ p < 4 +3 +αβ +γ2 . For the drift-implicit Milstein scheme, Lemma 4.1 in Neuenkirch and +Szpruch [15] guaranteed that +max +i=1,..,T/h E |ˆrih − rih| < ch, +if αβ +γ2 > 3 +2. These results indicate that the assumptions in Theorem 4.2 might be satisfied; +thus, (15) might hold for both the BEM scheme and the drift-implicit Milstein scheme +applied to the CIR process. +5.2 +Heston-Hull-White model +The Hull-White model (Hull and White [12]) is of the form +drt = α(β(t) − rt)dt + γdW 3 +t , +where α, γ > 0 and β : [0, T] → R+ is continuous. Given ru, u ∈ [0, t), the interest rate +rt is normally distributed with mean +e−α(t−u)ru + α +ˆ t +u +e−α(t−s)β(s)ds +11 + +and variance +γ2 +2α(1 − e−2α(t−u)) +(see Glasserman [9], p109). +In practice, it is often the case that β(t) has a simple +structure so that it is convenient to simulate rt exactly. Let ˆrt = rt, t ∈ [0, T]. Since it +holds that supt∈[0,T] E(rn +t ) < ∞ for any n ∈ N, Assumption 1 is satisfied. Furthermore, +we have E +� +e−4 +´ T +0 rtdt� +< ∞ (see Glasserman [9], p111) and E +� +e−4 �T/h−1 +i=0 +ˆrihh� +< ∞. +The latter expectation is finite because e−4 �T/h−1 +i=0 +ˆrihh is lognormal distributed. Thus, +from Theorem 4.3 we find that +E +�� +e− +´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� += O(h2) +for the full parameter regime of the Heston-Hull-White model. +5.3 +Heston-Black-Karasinski model +The Black-Karasinski model (Black and Karasinski [3]) can be written as +d ln rt = (β(t) − α ln rt)dt + γdW 3 +t , +where α, γ > 0 and β : [0, T] → R+ is continuous. It follows from Itˆo’s formula that +drt = rt +� +β(t) + γ2 +2 − α ln rt +� +dt + γrtdW 3 +t . +Given ru, u ∈ [0, t), the random variable rt has a lognormal distribution (see Brigo and +Mercurio [4]); hence, rt is usually simulated exactly. Specifically, given ru, u ∈ [0, t), the +logarithm of the interest rate ln rt is normally distributed with mean +e−α(t−u) ln ru + +ˆ t +u +e−α(t−s)β(s)ds +and variance +γ2 +2α(1 − e−2α(t−u)). +Let ˆrt = rt, t ∈ [0, T]. Since each moment of a lognormal random variable is finite, we +have supt∈[0,T] E(r2n +t ) < ∞ for any n ∈ N. It holds from the continuity of β and the +Cauchy-Schwarz inequality that +sup +t∈[0,T] +E +�� +rt +� +β(t) + γ2 +2 − α ln rt +��2n� +≤ cn sup +t∈[0,T] +E(r2n +t ) + cn sup +t∈[0,T] +E +� +(rt ln rt)2n� +≤ cn sup +t∈[0,T] +E(r2n +t ) + cn +� +sup +t∈[0,T] +E(r4n +t ) · sup +t∈[0,T] +E +� +(ln rt)4n� +< ∞. +12 + +k +θ +σ +ρ +α +β +γ +r0 +V0 +S0 +CIR-exact +3 +0.04 +0.25 +0.5 +1 +0.06 +0.25 +0.05 +0.04 +1 +CIR-BEM +3 +0.04 +0.25 +0.5 +3.5 +0.06 +0.25 +0.05 +0.04 +1 +HW +2 +0.04 +0.25 +0.5 +1 +0.05 +0.25 +0.05 +0.04 +1 +BK +3 +0.04 +0.25 +0.5 +1 +0.06 +0.25 +0.05 +0.04 +1 +Table 1: Parameters of the Heston model with stochastic interest rates. +Thus, Assumption 1 is satisfied. As rt is nonnegative, we obtain from Theorem 4.2 that +E +�� +e− ´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� += O(h2) +for all parameter regimes of the Heston-Black-Karasinski model. +6 +Numerical results +In this section, we conduct numerical experiments to verify the convergence rate derived +in Section 4 and then evaluate the efficiency of the log-Euler scheme we develop combined +with Rhee and Glynn’s unbiased estimators. +We consider the Heston model with three different interest rate models: the CIR +model, the Hull-White model and the Black-Karasinski model. For the CIR model, we +focus on two methods to simulate the path: one is the exact simulation method and the +other is the BEM scheme from Neuenkirch and Szpruch [15]. For the the Hull-White +model and the Black-Karasinski model, we simulate the paths exactly. We aim to test +the convergence rate of +Err(h) := E +�� +e− +´ T +0 rtdtP(ST ) − e− �T/h−1 +i=0 +ˆrihhP( ˆSh +T ) +�2� +, +where P(ST ) := max(K − ST , 0), K > 0, is the payoff of an European put option. Note +that the payoff of such an option satisfies Assumption 2. The model parameters are +available at Table 1 and we set T = 1 and S0 = K = 1 for all cases. All experiments are +performed in Matlab. +Figure 1 plots log2(Err(h)) against − log2(h), with h = 2−n, n = 0, 1, 2, ..., 7. Here, +the values of ST and +´ T +0 rtdt are approximated using the Euler scheme in Section 2 based +on a very small step size 2−10. For the BEM method, (rt)t∈[0,T] needs an additionally +approximation also using step size 2−10, which shares the same Brownian motion path +with the corresponding (ˆrih)i=1,2,.... To estimate Err(h), the number of Monte Carlo +samples for each h in each model is at least 0.5 million. As illustrated in Figure 1, the +convergence rate in all cases is two, which is consistent with the theoretical convergence +rate. +Next, we incorporate the log-Euler scheme into Rhee and Glynn’s unbiased estima- +tors. As discussed in Section 3, the implementation of the unbiased estimator Z requires +13 + +0 +1 +2 +3 +4 +5 +6 +7 +-log2(h) +-24 +-22 +-20 +-18 +-16 +-14 +-12 +-10 +-8 +-6 +log2(Err(h)) +CIR-exact +CIR-BEM +HW +BK +Figure 1: Convergence rate for the Heston model with stochastic interest rates. The +model parameters are from Table 1. +RMSE +Computational time +CIR-exact +2.18 × 10−4 +8.34 +CIR-BEM +1.49 × 10−4 +8.62 +HW +2.45 × 10−4 +5.90 +BK +1.58 × 10−4 +9.06 +Table 2: The RMSE and computational time (in seconds) of Z based on 106 samples. +The model parameters are from Table 1. +setting a distribution of N. In this experiment, we simply take P(N ≥ n) = 2−3n/2, +n ∈ N, so that (2) and (3) are finite. Hence, Z is unbiased with a finite variance and +finite computational time. Table 2 reports the root mean square error (RMSE) and the +computational time (in seconds) of Z based on 1 million samples. Note that for some +applications, either the variance or the computational time of Z can be infinite, see +Zheng, Blanchet and Glynn [21]. We see from Table 2 that all of these quantities are +finite, which again coincides with the theory. This suggests that the log-Euler scheme +we develop is well-suited to the framework of Rhee and Glynn’s unbiased estimators. +Furthermore, compared with the result from Figure 1, we observe that a method for a +interest rate model with a large RMSE of Z tends to have a large Err(h). Thus, to +make the RMSE small, we may prefer a method with a small error in L2 norm for the +same model parameters. +14 + +7 +Conclusion +In this article, we develop a semi-exact log-Euler scheme for the Heston model with +stochastic interest rates and analyse the relevant convergence rate in L2 norm. The SDEs +of the Heston model with stochastic interest rates can be divided into two components: +the Heston component and the interest rate component. Under mild assumptions on +the interest rate component but with no assumption on the Heston component, we show +that the convergence rate is one, which allows us to easily incorporate the log-Euler +scheme into Rhee and Glynn’s unbiased estimators. Furthermore, we demonstrate that +the log-Euler scheme and the convergence analysis apply to a large class of interest rate +models. +There are two directions of extensions that might be interesting: The log-Euler +scheme we consider is based on the assumption that the driven Brownian motion W 3 for +the interest rate model is independent of the driven Brownian motions W 1 and W 2 for +the SDEs of S and V . One direction is to extend the scheme to the case without this +assumption, i.e., the full constant correlation case, and analyse the convergence rate. +This is nontrivial because the random variable N and stochastic process r in equation +(1) then is not independent. The other direction is to extend the payoff P to more +complicated cases, as those in Cozma, Mariapragassam and Reisinger [6]. +References +[1] Alfonsi, A. (2005). On the discretization schemes for the CIR (and Bessel squared) processes. +Monte Carlo Methods and Applications, 11(4), 355-384. +[2] Bakshi, S., Cao, C., Chen, Z. (2000). Pricing and hedging long-term options. Journal of +Econometrics 94, 2003-2049. +[3] Black, F and Karasinski, P (1991). Bond and option pricing when short rates are lognormal, +Financial Analysts Journal, 52-59. +[4] Brigo, D and Mercurio, F (2006). Interest rate models–theory and practice: with smile, +inflation and credit, Springer Verlag. +[5] Cox, J. Ingersoll, J. and Ross, S (1985). A theory of term structure of interest rates. Econo- +metrica, 53(2), 385-407. +[6] Cozma, A., Mariapragassam, M and Reisinger, C. (2018). Convergence of an Euler scheme +for a hybrid stochastic-local volatility model with stochastic rates in foreign exchange mar- +kets, SIAM Journal on Financial Mathematics, 9, 127-170. +[7] Dufresne, D (2001). The integrated square-root process. Working Paper, University of Mon- +treal. https://minerva-access.unimelb.edu.au/handle/11343/33693. +[8] Giles, M (2008). Multilevel Monte Carlo path simulation. Operations Research, 56(3), 607- +617. +[9] Glasserman, P (2003). Monte Carlo Methods in Financial Engineering. Springer Sciences +and Business media, New York. +15 + +[10] Grzelak, L.A and Oosterlee, C.W (2011). On the Heston model with stochastic interest +rates. SIAM Journal on Financial Mathematics, 2(1), 255-286. +[11] Heston, S (1993). A closed-form solution for options with stochastic volatility with appli- +cations to bond and currency options. Review of Financial Studies, 6(2), 327 - 343. +[12] Hull, J.C and White, A (1990). Pricing interest rate derivative securities. Review of Finan- +cial Studies, 3(4), 573-592. +[13] Kloeden, P and Platen, E (1999). Numerical Solution of Stochastic Differential Equations, +3rd edition, Springer Verlag, New York. +[14] Mickel, A. and Neuenkirch, A (2021). The weak convergence rate of two semi-exact dis- +cretization schemes for the Heston model. Risks, 9(1), 23. +[15] Neuenkirch, A. and Szpruch, L (2014). First order strong approximations of scalar SDEs +with values in a domain. Numerische Mathematik, 128, 103-136. +[16] Protter, P (2005). Stochastic Integration and Differential Equations, 2nd edition, Springer. +[17] Rhee, C-H and Glynn, P.W (2015). Unbiased estimation with square root convergence for +SDE models. Operations Research, 63(5), 1026-1043. +[18] Van Haastrecht, A and Pelsser, A (2011). Generic pricing of FX, inflation and stock options +under stochastic interest rates and stochastic volatility, Quantitative Finance, 11, 665-691. +[19] Zheng, C (2017). Weak convergence rate of a time-discrete scheme for the Heston stochastic +volatility model. SIAM Journal on Numerical Analysis, 55(3), 1243-1263. +[20] Zheng, C (2020). Multilevel Monte Carlo simulation for the Heston stochastic volatility +model. Preprint at SSRN: http://dx.doi.org/10.2139/ssrn.2804894. +[21] Zheng, Z., Blanchet, J. and Glynn, P (2018). Rates of convergence and CLTs for subcanon- +ical debiased MLMC. In: Owen, A., Glynn, P. (eds) Monte Carlo and Quasi-Monte Carlo +Methods in Scientific Computing 2016. +16 + diff --git a/hdFLT4oBgHgl3EQfaC_d/content/tmp_files/load_file.txt b/hdFLT4oBgHgl3EQfaC_d/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..82c728d046a74b47ab55d60acdf3a186ec7a50eb --- /dev/null +++ b/hdFLT4oBgHgl3EQfaC_d/content/tmp_files/load_file.txt @@ -0,0 +1,388 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf,len=387 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='12072v1 [q-fin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='CP] 28 Jan 2023 Unbiased estimators for the Heston model with stochastic interest rates Chao Zheng ∗and Jiangtao Pan School of Data Sciences, Zhejiang University of Finance and Economics, Hangzhou, China Abstract We combine the unbiased estimators in Rhee and Glynn (Operations Research: 63(5), 1026-1043, 2015) and the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Specif- ically, we first develop a semi-exact log-Euler scheme for the Heston model with stochastic interest rates, and then, under mild assumptions, we show that the con- vergence rate in L2 norm is O(h), where h is the step size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The result applies to a large class of models, such as the Heston-Hull-While model, the Heston-CIR model and the Heston-Black-Karasinski model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Numerical experiments confirm our theoretical convergence rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Keywords: Heston model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' stochastic interest rates,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' unbiased estimators,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' conver- gence rate,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Heston-Hull-While model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Heston-CIR model AMS subject classifications (2000): 60H35,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 65C30,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 91G60 1 Introduction The classical Heston model (Heston [11]) is one of the fundamental models in finance,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' and it has been widely applied in various financial markets,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' such as the equity,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' the fixed income and the foreign exchange markets,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' due to its tractability in modelling the term structure of implied volatility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' However, in this model, the interest rate is constant, and this assumption is often not appropriate for long-maturity options, because the long-term behaviour of the interest rate is typically far from being constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This phenomenon has been empirically investigated by Bakshi, Cao and Chen [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' A natural extension of the Heston model is to add a stochastic interest rate, which is usually referred to as the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' There are several contributions in this direction, such as Grzelak and Oosterlee [10], Van Haastrecht and Pelsser [18] and references therein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' ∗Email: chao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='zheng12@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='com.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This research is supported by National Natural Science Founda- tion of China (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 11801504).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 1 Under the Heston model with stochastic interest rates, the price of an option can be written as E � e− ´ T 0 rtdtP(S) � where S is the solution to the Heston model with stochastic interest rates and rt is the underlying interest rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Here, P denotes the payoff functional of an option.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We are interested in calculating this expectation, as for the majority of options, there are no closed-form formulas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' A classical approach is to use a Monte Carlo method associated with a time-discrete scheme on S for an approximate value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Recently, Rhee and Glynn [17] proposed several unbiased Monte Carlo estimators based on a randomization idea for a stochastic differential equation, which are unbiased versions of multilevel Monte Carlo estimators by Giles [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' These unbiased estimators have a clear advantage over the standard Monte Carlo estimator, as the latter is typically biased when S has to be approximated through a time-discrete scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' To combine Rhee and Glynn’s estimators and the Heston model with stochastic interest rates, it is essential to develop a numeri- cal scheme with a good convergence rate in L2 norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' However, the standard theorems, such as those in Kloeden and Platen [13], require that the drift and diffusion coefficients satisfy the global Lipschitz and linear growth assumptions, which are not satisfied by the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Research on developing time-discrete schemes for the Heston model with stochastic interest rates is scarce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Cozma, Mariapra- gassam and Reisinger [6] proposed a different log-Euler scheme for the stochastic-local volatility model with stochastic rates, which includes the model we consider, and they demonstrated strong convergence without providing a rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This is the only reference we are aware of concerning the convergence of Monte Carlo algorithms for the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In this article, we develop a semi-exact log-Euler scheme for the Heston model with stochastic interest rates, where the driven Brownian motion for interest rate models is independent of the driven Brownian motions for the Heston component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The scheme is an extension of those in Mickel and Neuenkirch [14] and Zheng [19] for the classical Heston model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Under mild assumptions on interest rate models and the assumption that the payoff of an option is Lipschitz continuous and bounded, we show that the underlying scheme converges with order one in L2 norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' There are two advantages of the scheme we develop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' One is that the convergence rate is free of Feller’s index, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', the convergence rate is valid for the full range of parameters in the Heston component of the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The other is that the convergence rate is higher than the usual convergence rate (one-half in L2 norm) of the standard Euler scheme under standard assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' When a numerical scheme has a convergence rate higher than one-half, it is convenient to combine it into Rhee and Glynn’s unbiased estimators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Our result applies to a large class of models, including the Heston-Hull-While model, the Heston-CIR model and the Heston-Black-Karasinski model, among which the Heston- Hull-White model and the Heston-CIR model are particularly attractive in practical applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We refer readers to Grzelak and Oosterlee [10] for more discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The remainder of the article is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In section 2, we review the Heston model with stochastic interest rates and develop a log-Euler scheme for it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Section 2 3 reviews the unbiased estimators from Rhee and Glynn [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In section 4, we derive the relevant convergence rate under several mild assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Section 5 illustrates numerical results to support our theoretical analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 2 Heston model with stochastic interest rates Let (Ω, F, (Ft)t≥0, P) be a filtered probability space satisfying the usual assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The Heston model with stochastic interest rates is of the form dSt = rtStdt + � VtSt(ρdW 1 t + � 1 − ρ2dW 2 t ) dVt = k(θ − Vt)dt + σ � VtdW 1 t , where (W 1 t )t≥0 and (W 2 t )t≥0 are two independent Ft-adapted Brownian motions and the parameters k, θ, σ > 0 and ρ ∈ [−1, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Here, (rt)t≥0 is a stochastic interest rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The classical interest rate models and their generalizations can be found at Brigo and Mercurio [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Among them, a large class of interest rate models can be written as drt = µ(t, rt)dt + φ(t, rt)dW 3 t , where µ, φ : [0, T] × R → R are continuous functions and (W 3 t )t≥0 is a Ft-adapted Brownian motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We assume that (W 3 t )t≥0 is independent of (W 1 t )t≥0 and (W 2 t )t≥0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Furthermore, we assume that there is a unique solution to the equation of rt above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let Xt = ln(St).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' By using Itˆo’s lemma, we have dXt = � rt − 1 2Vt � dt + � Vt � ρdW 1 t + � 1 − ρ2dW 2 t � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Then substituting the equation of Vt into the equation above, we obtain dXt = �� rt − kρθ σ � + �kρ σ − 1 2 � Vt � dt + ρ σdVt + � 1 − ρ2� VtdW 2 t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Since (Vt)t≥0 is independent of (W 2 t )t≥0, the stochastic integral ´ T 0 √VtdW 2 t is normally distributed with mean 0 and variance ´ T 0 Vtdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Therefore, the solution at any finite time horizon T > 0 can be written as XT = X0 + �ˆ T 0 rtdt + �ρk σ − 1 2 � ˆ T 0 Vtdt + ρ σ (VT − V0 − kθT) + � 1 − ρ2 �ˆ T 0 VtdtN \uf8f9 \uf8fb (1) where N is a standard normal random variable, that is independent of (Vt)t∈[0,T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Note that N is also independent of (rt)t∈[0,T], because the driving Brownian motion (W 3 t )t∈[0,T] of (rt)t∈[0,T] is independent of (W 2 t )t∈[0,T] and (V 2 t )t∈[0,T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We see that there are several integrals in equation (1) to be approximated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 3 It is known that Vt follows a scaled noncentral chi-squared distribution given Vu for any u ∈ [0, t), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Vt d= σ2(1 − e−k(t−u)) 4k χ2 d � 4ke−k(t−u) σ2(1 − e−k(t−u))Vu � , where χ2 d(λ) denotes a non-central chi-squared random variable with degrees of freedom d = 4kθ σ2 > 0 and noncentrality parameter λ > 0 (see Glasserman [9]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Hence, Vt can be sampled exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let (ˆrih)i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/h be an approximate path of (rt)t∈[0,T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' It is convenient to approximate ˆ T 0 rtdt ≈ T/h−1 � i=0 ˆrihh, ˆ T 0 Vtdt ≈ T/h−1 � i=0 Vihh, using the Euler scheme based on step size h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We denote by ˆXh T the approximated solution of XT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let ˆSh T := e ˆ Xh T , so that ˆSh T is an approximation of ST.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 3 Unbiased estimators for SDEs In this section, we review the unbiased estimators introduced in Rhee and Glynn [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The prices of many options can be expressed as E(Y ) := E � e− ´ T 0 rtdtP(ST ) � where P is the payoff functional and Y ∈ L2 (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', E(Y 2) < ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' To estimate this expectation, Rhee and Glynn [17] proposed an estimator Z = N � n=0 ∆n/P(N ≥ n) where ∆n = Yn − Yn−1 and Yn, n ∈ N, is an approximation of Y with step size T/2n and Y−1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In this article, we let Yn = e− �T/h−1 i=0 ˆrihhP( ˆSh T ), h = T/2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Here, N is a nonnegative integer-valued random variable that is independent of Yn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This estimator is usually refereed to as the coupled sum estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' There are other unbiased estimators constructed in a similar way (see Rhee and Glynn [17]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Suppose that Yn converges to Y in L2 norm as n → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Theorem 1 of Rhee and Glynn [17] showed that if ∞ � n=1 E � (Yn−1 − Y )2� P(N ≥ n) < ∞ (2) 4 then Z is an unbiased estimator of E(Y ) (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', E(Z) = E(Y )) with a finite variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Furthermore, the average computational time of Z is proportional to ∞ � n=0 2nP(N ≥ n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (3) Therefore, if E � (Yn − Y )2� = O(2−2np) = O(h2p) with p > 1/2 (Here, p is the con- vergence rate in L2 norm), we can easily construct a distribution for N such that P(N ≥ n) = O(2−n(p+1/2)) to ensure that (2) and (3) are finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The optimal distri- bution of N can be calculated based on minimizing the product of the variance and the average computational time of Z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' see Rhee and Glynn [17] for more discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Hence, it is important to investigate the convergence rate of E � (Yn − Y )2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 4 Convergence analysis Recall that (rt)t∈[0,T] follows the stochastic differential equation drt = µ(t, rt)dt + φ(t, rt)dW 3 t , where (W 3 t )t∈[0,T] is independent of (W 1 t )t∈[0,T] and (W 2 t )t∈[0,T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let c and cn be constants regardless of their values, where cn relies on n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Our analysis throughout this article is based on the following assumption: Assumption 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For any n ∈ N, it holds that sup t∈[0,T] E � (µ(t, rt))2n� < ∞, sup t∈[0,T] E � (φ(t, rt))2n� < ∞, and the approximate interest rate (ˆrih)i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/h satisfies max i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/hE � (ˆrih − rih)2n� < cnh2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' A typical time-discrete scheme that may satisfy Assumption 1 is the Mil- stein scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Under some standard assumptions on model coefficients (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Lipschitz continuity, linear growth), the convergence rate in L2 norm is one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Under Assumption 1, we have E �� XT − ˆXh T �2n� = O(h2n), ∀n ∈ N+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 5 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Straightforward calculation verifies that E �� XT − ˆXh T �2n� ≤ cnE \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb + cnE \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 Vtdt − T/h−1 � i=0 Vihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb + cnE \uf8ee \uf8ef\uf8f0 \uf8eb \uf8ec \uf8ed �ˆ T 0 Vtdt − � � � � T/h−1 � i=0 Vihh \uf8f6 \uf8f7 \uf8f8 2n\uf8f9 \uf8fa\uf8fb ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (4) where the Cauchy-Schwarz inequality implies that E \uf8ee \uf8ef\uf8f0 \uf8eb \uf8ec \uf8ed �ˆ T 0 Vtdt − � � � � T/h−1 � i=0 Vihh \uf8f6 \uf8f7 \uf8f8 2n\uf8f9 \uf8fa\uf8fb = E \uf8ee \uf8ef\uf8f0 \uf8eb \uf8ec \uf8ed ´ T 0 Vtdt − �T/h−1 i=0 Vihh �´ T 0 Vtdt + ��T/h−1 i=0 Vihh \uf8f6 \uf8f7 \uf8f8 2n\uf8f9 \uf8fa\uf8fb ≤ � � � � �E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 Vtdt − T/h−1 � i=0 Vihh \uf8f6 \uf8f8 4n\uf8f9 \uf8fb · � � � � �E \uf8ee \uf8f0 � 1 ´ T 0 Vtdt �2n\uf8f9 \uf8fb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (5) From Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1(a) in Dufresne [7], we learn that E \uf8ee \uf8f0 � 1 ´ T 0 Vtdt �2n\uf8f9 \uf8fb < ∞ for any n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Thus, it suffices to investigate the two quantities E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 Vtdt − T/h−1 � i=0 Vihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb , E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let us focus on the quantity of rt, because the analysis of the quantity of Vt is similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' It holds that E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb ≤ cnE \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 rihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb + cnE \uf8ee \uf8f0 \uf8eb \uf8ed T/h−1 � i=0 (ˆrih − rih)h \uf8f6 \uf8f8 2n\uf8f9 \uf8fb .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (6) 6 Let η(t) := max{lh : lh ≤ t, l = 0, 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the first term of (6), we have E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 rihh \uf8f6 \uf8f8 2n\uf8f9 \uf8fb = E ��ˆ T 0 rη(t)dt − ˆ T 0 rtdt �2n� ≤ cnE \uf8ee \uf8f0 �ˆ T 0 �ˆ t η(t) µ(u, ru)du � dt �2n\uf8f9 \uf8fb + cnE \uf8ee \uf8f0 �ˆ T 0 �ˆ t η(t) φ(u, ru)dW 3 u � dt �2n\uf8f9 \uf8fb .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (7) An application of the Fubini theorem yields ˆ T 0 �ˆ t η(t) µ(u, ru)du � dt = ˆ T 0 �ˆ η(u)+h u µ(u, ru)dt � du = h ˆ T 0 � 1 + η(u) − u h � µ(u, ru)du.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Hence, with substitution of the equation above, it follows from Jensen’s inequality that E \uf8ee \uf8f0 �ˆ T 0 �ˆ t η(t) µ(u, ru)du � dt �2n\uf8f9 \uf8fb ≤ cnh2n ˆ T 0 E[µ2n(u, ru)]du = O(h2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (8) Furthermore, we obtain from the stochastic Fubini theorem (see Theorem 65, Protter [16]) and the Burkholder-Davies-Gundy inequality that E \uf8ee \uf8f0 �ˆ T 0 �ˆ t η(t) φ(u, ru)dW 3 u � dt �2n\uf8f9 \uf8fb = h2nE ��ˆ T 0 � 1 + η(u) − u h � φ(u, ru)dW 3 u �2n� ≤ cnh2nE ��ˆ T 0 φ2(u, ru)du �n� ≤ cnh2n ˆ T 0 E[φ2n(u, ru)]du = O(h2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (9) Therefore, by using (8) and (9), we show that (7) is O(h2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the second term of (6), by Jensen’s inequality, we have E \uf8ee \uf8f0 \uf8eb \uf8ed T/h−1 � i=0 (ˆrih − rih)h \uf8f6 \uf8f8 2n\uf8f9 \uf8fb ≤ T/h−1 � i=0 E � (ˆrih − rih)2n� h = O(h2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (10) 7 Thus, combining (7) and (10) into (6), we conclude that E ��´ T 0 rtdt − �T/h−1 i=0 ˆrihh �2n� = O(h2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' It can be proven analogously that E ��´ T 0 Vtdt − �T/h−1 i=0 Vihh �2n� = O(h2n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Finally, we substitute (5) and the two O(h2n) terms above into (4) to complete the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Then we proceed to the convergence rate of the underlying log-Euler scheme to approximate the price of an option.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This requires us to impose a boundedness assumption on the option payoff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The payoff P : [0, +∞) → R is Lipschitz continuous and there exists a constant C > 0, such that P(U) = P(C) for all U > C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Under Assumption 2, it holds that |P(U1) − P(U2)| ≤ c| ln U1 − ln U2| (11) for all U1, U2 ∈ [0, +∞), see Thereom 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1 in Zheng [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The payoff that satisfies As- sumption 2 is bounded, which is typically suitable for a put-style option.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The put-style option becomes worthless when the price of the underlying asset ST is sufficiently high.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For example, the standard European put option has the payoff P(ST ) := max{K−ST , 0} with K > 0, which satisfies Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Suppose that Assumptions 1 and 2 are satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Suppose that (rt)t∈[0,T] and its approximation (ˆrih)i=1,2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/h are nonnegative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Then, we have E �� e− ´ T 0 rtdtP(ST ) �2� < ∞ and E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� = O(h2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the first term, since (rt)t∈[0,T] is nonnegative and Assumption 2 implies that P is bounded, it is trivial that E �� e− ´ T 0 rtdtP(ST ) �2� < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the second term, we have E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� = E �� e− ´ T 0 rtdt � P(ST ) − P( ˆSh T ) � + P( ˆSh T ) � e− ´ T 0 rtdt − e− �T/h−1 i=0 ˆrihh��2� ≤ 2E � e−2 ´ T 0 rtdt � P(ST ) − P( ˆSh T ) �2� + 2E � P 2( ˆSh T ) � e− ´ T 0 rtdt − e− �T/h−1 i=0 ˆrihh�2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (12) 8 As it holds that |e−x − e−y| ≤ |x − y| for any x, y ≥ 0 and the processes (rt)t∈[0,T] and (ˆrih)i=1,2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/h are nonnegative, we obtain E � P 2( ˆSh T ) � e− ´ T 0 rtdt − e− �T/h−1 i=0 ˆrihh�2� ≤ cE \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 2\uf8f9 \uf8fb = O(h2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (13) The right-hand side of (13) is O(h2);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' see the proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' By using the Cauchy- Schwarz inequality, inequality (11) and Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1, we have E � e−2 ´ T 0 rtdt � P(ST ) − P( ˆSh T ) �2� ≤ � E � e−4 ´ T 0 rtdt� � E �� P(ST ) − P( ˆSh T ) �4� ≤ c � E �� ln(ST ) − ln( ˆSh T ) �4� = O(h2), (14) where E � e−4 ´ T 0 rtdt� < ∞, since rt is nonnegative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' With (13) and (14) substituted into (12), the proof is complete.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We emphasize that Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2 is for nonnegative interest rate processes, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', P(rt ≥ 0, ∀t ∈ [0, T]) = 1, which are satisfied by a large class of interest rate models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' However, if rt can be negative, for example when rt follows the Hull-White model, then inequality (13) may not be satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' To address this problem, we establish Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='3 below: Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Suppose that Assumptions 1 and Assumption 2 are satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Suppose that E � e−4 ´ T 0 rtdt� < ∞ and E � e−4 �T/h−1 i=0 ˆrihh� < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Then, we have E �� e− ´ T 0 rtdtP(ST ) �2� < ∞ and E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� = O(h2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the first term, it follows from Jensen’s inequality that E �� e− ´ T 0 rtdtP(ST ) �2� < cE � e−2 ´ T 0 rtdt� < c � E � e−4 ´ T 0 rtdt� < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 9 Then, we focus on the second term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The classical Taylor expansion, together with the Cauchy-Schwarz inequality, gives E �� e− ´ T 0 rtdt − e− �T/h−1 i=0 ˆrihh�2� = E \uf8ee \uf8f0e−2ε \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 2\uf8f9 \uf8fb ≤ � E � max(e−4 ´ T 0 rtdt, e−4 �T/h−1 i=0 ˆrihh) � � � � � �E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 4\uf8f9 \uf8fb where ε is between ´ T 0 rtdt and �T/h−1 i=0 ˆrihh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We have proved in Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1 that E \uf8ee \uf8f0 \uf8eb \uf8ed ˆ T 0 rtdt − T/h−1 � i=0 ˆrihh \uf8f6 \uf8f8 4\uf8f9 \uf8fb = O(h4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Note that E � max(e−4 ´ T 0 rtdt, e−4 �T/h−1 i=0 ˆrihh) � ≤ E � e−4 ´ T 0 rtdt� + E � e−4 �T/h−1 i=0 ˆrihh� < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Consequently E �� e− ´ T 0 rtdt − e− �T/h−1 i=0 ˆrihh�2� = O(h2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The remaining of the proof can be easily completed by using the inequality E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� ≤ 2E � e−2 ´ T 0 rtdt � P(ST ) − P( ˆSh T ) �2� + 2E � P 2( ˆSh T ) � e− ´ T 0 rtdt − e− �T/h−1 i=0 ˆrihh�2� and following similar steps as in Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 5 Applications In this section, we apply our results from Section 4 to several well-known interest rate models in finance, including the CIR model, the Hull-White model and the Black- Karasinski model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We always assume that the payoff P satisfies Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1 Heston-CIR model The CIR model, which was introduced by Cox, Ingersoll and Ross [5], is represented as drt = α(β − rt)dt + γ√rtdW 3 t , 10 where α, β, γ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' It is known that rt follows a scaled noncentral chi-squared distribution given ru, u ∈ [0, t), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', rt d= γ2(1 − e−α(t−u)) 4α χ2 d � 4αe−k(t−u) σ2(1 − e−α(t−u))ru � , where χ2 d(λ) denotes a noncentral chi-squared random variable with degrees of freedom d = 4αβ γ2 and noncentrality parameter λ (see Glasserman [9]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the exact simulation of rt, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', ˆrt = rt, t ∈ [0, T], we have supt∈[0,T] E(rn t ) < ∞ for any n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Hence, it is easy to verify that Assumption 1 is satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' As P(rt ≥ 0, ∀t ∈ [0, T]) = 1, it follows from Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2 that E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� = O(h2) (15) for the full parameter regime of the Heston-CIR model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Since the exact simulation of the CIR process can be time-consuming, there are sev- eral time-discrete schemes for the CIR process (see Alfonsi [1] for discussions).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Neuenkirch and Szpruch [15] showed that the BEM scheme and the drift-implicit Milstein scheme preserve the nonnegativity of the CIR process, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', P(ˆrt ≥ 0, ∀t ∈ [0, T]) = 1 and both of them are strongly convergent with order one when 2αβ γ2 > 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Specifically, for the BEM scheme, Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1 in Neuenkirch and Szpruch [15] demonstrated that E � max i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/h(ˆrih − rih)p � < cnhp, if 2 ≤ p < 4 3 αβ γ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the drift-implicit Milstein scheme, Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='1 in Neuenkirch and Szpruch [15] guaranteed that max i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='.,T/h E |ˆrih − rih| < ch, if αβ γ2 > 3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' These results indicate that the assumptions in Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2 might be satisfied;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' thus, (15) might hold for both the BEM scheme and the drift-implicit Milstein scheme applied to the CIR process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2 Heston-Hull-White model The Hull-White model (Hull and White [12]) is of the form drt = α(β(t) − rt)dt + γdW 3 t , where α, γ > 0 and β : [0, T] → R+ is continuous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Given ru, u ∈ [0, t), the interest rate rt is normally distributed with mean e−α(t−u)ru + α ˆ t u e−α(t−s)β(s)ds 11 and variance γ2 2α(1 − e−2α(t−u)) (see Glasserman [9], p109).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In practice, it is often the case that β(t) has a simple structure so that it is convenient to simulate rt exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let ˆrt = rt, t ∈ [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Since it holds that supt∈[0,T] E(rn t ) < ∞ for any n ∈ N, Assumption 1 is satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Furthermore, we have E � e−4 ´ T 0 rtdt� < ∞ (see Glasserman [9], p111) and E � e−4 �T/h−1 i=0 ˆrihh� < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The latter expectation is finite because e−4 �T/h−1 i=0 ˆrihh is lognormal distributed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Thus, from Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='3 we find that E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� = O(h2) for the full parameter regime of the Heston-Hull-White model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='3 Heston-Black-Karasinski model The Black-Karasinski model (Black and Karasinski [3]) can be written as d ln rt = (β(t) − α ln rt)dt + γdW 3 t , where α, γ > 0 and β : [0, T] → R+ is continuous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' It follows from Itˆo’s formula that drt = rt � β(t) + γ2 2 − α ln rt � dt + γrtdW 3 t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Given ru, u ∈ [0, t), the random variable rt has a lognormal distribution (see Brigo and Mercurio [4]);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' hence, rt is usually simulated exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Specifically, given ru, u ∈ [0, t), the logarithm of the interest rate ln rt is normally distributed with mean e−α(t−u) ln ru + ˆ t u e−α(t−s)β(s)ds and variance γ2 2α(1 − e−2α(t−u)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Let ˆrt = rt, t ∈ [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Since each moment of a lognormal random variable is finite, we have supt∈[0,T] E(r2n t ) < ∞ for any n ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' It holds from the continuity of β and the Cauchy-Schwarz inequality that sup t∈[0,T] E �� rt � β(t) + γ2 2 − α ln rt ��2n� ≤ cn sup t∈[0,T] E(r2n t ) + cn sup t∈[0,T] E � (rt ln rt)2n� ≤ cn sup t∈[0,T] E(r2n t ) + cn � sup t∈[0,T] E(r4n t ) · sup t∈[0,T] E � (ln rt)4n� < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 12 k θ σ ρ α β γ r0 V0 S0 CIR-exact 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='5 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 1 CIR-BEM 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 1 HW 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='5 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 1 BK 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='5 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='04 1 Table 1: Parameters of the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Thus, Assumption 1 is satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' As rt is nonnegative, we obtain from Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2 that E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� = O(h2) for all parameter regimes of the Heston-Black-Karasinski model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 6 Numerical results In this section, we conduct numerical experiments to verify the convergence rate derived in Section 4 and then evaluate the efficiency of the log-Euler scheme we develop combined with Rhee and Glynn’s unbiased estimators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We consider the Heston model with three different interest rate models: the CIR model, the Hull-White model and the Black-Karasinski model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the CIR model, we focus on two methods to simulate the path: one is the exact simulation method and the other is the BEM scheme from Neuenkirch and Szpruch [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the the Hull-White model and the Black-Karasinski model, we simulate the paths exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We aim to test the convergence rate of Err(h) := E �� e− ´ T 0 rtdtP(ST ) − e− �T/h−1 i=0 ˆrihhP( ˆSh T ) �2� , where P(ST ) := max(K − ST , 0), K > 0, is the payoff of an European put option.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Note that the payoff of such an option satisfies Assumption 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The model parameters are available at Table 1 and we set T = 1 and S0 = K = 1 for all cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' All experiments are performed in Matlab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Figure 1 plots log2(Err(h)) against − log2(h), with h = 2−n, n = 0, 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Here, the values of ST and ´ T 0 rtdt are approximated using the Euler scheme in Section 2 based on a very small step size 2−10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' For the BEM method, (rt)t∈[0,T] needs an additionally approximation also using step size 2−10, which shares the same Brownian motion path with the corresponding (ˆrih)i=1,2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='. To estimate Err(h), the number of Monte Carlo samples for each h in each model is at least 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='5 million.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' As illustrated in Figure 1, the convergence rate in all cases is two, which is consistent with the theoretical convergence rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Next, we incorporate the log-Euler scheme into Rhee and Glynn’s unbiased estima- tors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' As discussed in Section 3, the implementation of the unbiased estimator Z requires 13 0 1 2 3 4 5 6 7 log2(h) 24 22 20 18 16 14 12 10 8 6 log2(Err(h)) CIR-exact CIR-BEM HW BK Figure 1: Convergence rate for the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The model parameters are from Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' RMSE Computational time CIR-exact 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='18 × 10−4 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='34 CIR-BEM 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='49 × 10−4 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='62 HW 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='45 × 10−4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='90 BK 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='58 × 10−4 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='06 Table 2: The RMSE and computational time (in seconds) of Z based on 106 samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The model parameters are from Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' setting a distribution of N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In this experiment, we simply take P(N ≥ n) = 2−3n/2, n ∈ N, so that (2) and (3) are finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Hence, Z is unbiased with a finite variance and finite computational time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Table 2 reports the root mean square error (RMSE) and the computational time (in seconds) of Z based on 1 million samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Note that for some applications, either the variance or the computational time of Z can be infinite, see Zheng, Blanchet and Glynn [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' We see from Table 2 that all of these quantities are finite, which again coincides with the theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This suggests that the log-Euler scheme we develop is well-suited to the framework of Rhee and Glynn’s unbiased estimators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Furthermore, compared with the result from Figure 1, we observe that a method for a interest rate model with a large RMSE of Z tends to have a large Err(h).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Thus, to make the RMSE small, we may prefer a method with a small error in L2 norm for the same model parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 14 7 Conclusion In this article, we develop a semi-exact log-Euler scheme for the Heston model with stochastic interest rates and analyse the relevant convergence rate in L2 norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The SDEs of the Heston model with stochastic interest rates can be divided into two components: the Heston component and the interest rate component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Under mild assumptions on the interest rate component but with no assumption on the Heston component, we show that the convergence rate is one, which allows us to easily incorporate the log-Euler scheme into Rhee and Glynn’s unbiased estimators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Furthermore, we demonstrate that the log-Euler scheme and the convergence analysis apply to a large class of interest rate models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' There are two directions of extensions that might be interesting: The log-Euler scheme we consider is based on the assumption that the driven Brownian motion W 3 for the interest rate model is independent of the driven Brownian motions W 1 and W 2 for the SDEs of S and V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' One direction is to extend the scheme to the case without this assumption, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', the full constant correlation case, and analyse the convergence rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' This is nontrivial because the random variable N and stochastic process r in equation (1) then is not independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The other direction is to extend the payoff P to more complicated cases, as those in Cozma, Mariapragassam and Reisinger [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' References [1] Alfonsi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' On the discretization schemes for the CIR (and Bessel squared) processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Monte Carlo Methods and Applications, 11(4), 355-384.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [2] Bakshi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Cao, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Pricing and hedging long-term options.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Journal of Econometrics 94, 2003-2049.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [3] Black, F and Karasinski, P (1991).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Bond and option pricing when short rates are lognormal, Financial Analysts Journal, 52-59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [4] Brigo, D and Mercurio, F (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Interest rate models–theory and practice: with smile, inflation and credit, Springer Verlag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [5] Cox, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Ingersoll, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' and Ross, S (1985).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' A theory of term structure of interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Econo- metrica, 53(2), 385-407.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [6] Cozma, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Mariapragassam, M and Reisinger, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Convergence of an Euler scheme for a hybrid stochastic-local volatility model with stochastic rates in foreign exchange mar- kets, SIAM Journal on Financial Mathematics, 9, 127-170.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [7] Dufresne, D (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The integrated square-root process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Working Paper, University of Mon- treal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' https://minerva-access.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='unimelb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='au/handle/11343/33693.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [8] Giles, M (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Multilevel Monte Carlo path simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Operations Research, 56(3), 607- 617.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [9] Glasserman, P (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Monte Carlo Methods in Financial Engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Springer Sciences and Business media, New York.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 15 [10] Grzelak, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='A and Oosterlee, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='W (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' On the Heston model with stochastic interest rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' SIAM Journal on Financial Mathematics, 2(1), 255-286.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [11] Heston, S (1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' A closed-form solution for options with stochastic volatility with appli- cations to bond and currency options.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Review of Financial Studies, 6(2), 327 - 343.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [12] Hull, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='C and White, A (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Pricing interest rate derivative securities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Review of Finan- cial Studies, 3(4), 573-592.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [13] Kloeden, P and Platen, E (1999).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Numerical Solution of Stochastic Differential Equations, 3rd edition, Springer Verlag, New York.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [14] Mickel, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' and Neuenkirch, A (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' The weak convergence rate of two semi-exact dis- cretization schemes for the Heston model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Risks, 9(1), 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [15] Neuenkirch, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' and Szpruch, L (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' First order strong approximations of scalar SDEs with values in a domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Numerische Mathematik, 128, 103-136.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [16] Protter, P (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Stochastic Integration and Differential Equations, 2nd edition, Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [17] Rhee, C-H and Glynn, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='W (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Unbiased estimation with square root convergence for SDE models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Operations Research, 63(5), 1026-1043.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [18] Van Haastrecht, A and Pelsser, A (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Generic pricing of FX, inflation and stock options under stochastic interest rates and stochastic volatility, Quantitative Finance, 11, 665-691.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [19] Zheng, C (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Weak convergence rate of a time-discrete scheme for the Heston stochastic volatility model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' SIAM Journal on Numerical Analysis, 55(3), 1243-1263.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [20] Zheng, C (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Multilevel Monte Carlo simulation for the Heston stochastic volatility model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Preprint at SSRN: http://dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2139/ssrn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content='2804894.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' [21] Zheng, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Blanchet, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' and Glynn, P (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' Rates of convergence and CLTs for subcanon- ical debiased MLMC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' In: Owen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=', Glynn, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' (eds) Monte Carlo and Quasi-Monte Carlo Methods in Scientific Computing 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} +page_content=' 16' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/hdFLT4oBgHgl3EQfaC_d/content/2301.12072v1.pdf'} diff --git a/i9FAT4oBgHgl3EQfaB2u/vector_store/index.faiss b/i9FAT4oBgHgl3EQfaB2u/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..343353f5d666ef613883865aa2bd38c6245d58d9 --- /dev/null +++ b/i9FAT4oBgHgl3EQfaB2u/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b118a83e5908ad9cd76f060b646ec84c98d2392d93682003681e4ec1a8ff685 +size 11665453 diff --git a/iNAzT4oBgHgl3EQfM_sr/vector_store/index.faiss b/iNAzT4oBgHgl3EQfM_sr/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..57cb7341b7e5b585fba2c913720734f0b6af39c6 --- /dev/null +++ b/iNAzT4oBgHgl3EQfM_sr/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc292d8025697d3293c71be517a171e5eaca804a4709f491d69dfce6e9ea70bd +size 4849709 diff --git a/iNAzT4oBgHgl3EQfM_sr/vector_store/index.pkl b/iNAzT4oBgHgl3EQfM_sr/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..72c8bd0668af451751d5076ccef21c43f8b9cd80 --- /dev/null +++ b/iNAzT4oBgHgl3EQfM_sr/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd8cf22ae5e43a6babee1a0f36e3560c3485f7eaec9981c153d235580a59ac4b +size 180137 diff --git a/idFIT4oBgHgl3EQfpStZ/content/tmp_files/2301.11322v1.pdf.txt b/idFIT4oBgHgl3EQfpStZ/content/tmp_files/2301.11322v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce2a811ff664aba3687f0bf1025ebd7e8c35acfa --- /dev/null +++ b/idFIT4oBgHgl3EQfpStZ/content/tmp_files/2301.11322v1.pdf.txt @@ -0,0 +1,570 @@ +Semi-Automated Construction of Food Composition Knowledge Base +Jason Youn,*1,2,3 Fangzhou Li,*1,2,3 Ilias Tagkopoulos1,2,3 +1 Department of Computer Science, University of California, Davis, CA 95616, USA. +2Genome Center, University of California, Davis, CA 95616, USA. +3USDA/NSF AI Institute for Next Generation Food Systems (AIFS), University of California, Davis, CA 95616, USA. +jyoun, fzli, itagkopoulos@ucdavis.edu +Abstract +A food composition knowledge base, which stores the essen- +tial phyto-, micro-, and macro-nutrients of foods is useful for +both research and industrial applications. Although many ex- +isting knowledge bases attempt to curate such information, +they are often limited by time-consuming manual curation +processes. Outside of the food science domain, natural lan- +guage processing methods that utilize pre-trained language +models have recently shown promising results for extracting +knowledge from unstructured text. In this work, we propose +a semi-automated framework for constructing a knowledge +base of food composition from the scientific literature avail- +able online. To this end, we utilize a pre-trained BioBERT +language model in an active learning setup that allows the +optimal use of limited training data. Our work demonstrates +how human-in-the-loop models are a step toward AI-assisted +food systems that scale well to the ever-increasing big data. +Introduction +Constructing high-quality knowledge bases of foods is cru- +cial for various needs like understanding the impact of di- +etary intake on human health and enabling personalized diet +recommendations (Jacobs and Tapsell 2007; Young 2003). +There exist multiple knowledge bases that attempt to cu- +rate food composition information such as FoodData Cen- +tral (406,999 foods and ∼300 key nutrients) (USDA 2019), +FooDB (797 foods and 15,750 chemicals) (TMIC 2017), +KNApSAcK (24,704 plant species and 62,647 metabolites) +(Shinbo et al. 2006), and Phenol-Explorer (459 foods and +501 polyphenols) (Rothwell et al. 2013). In an effort to cu- +rate the relationship between chemicals and human health, +knowledge bases like CTD (Davis et al. 2022) and KEGG +(Kanehisa and Goto 2000) curate information about the in- +teractions among chemicals, genes, and/or disease entities, +while other resources, including ChemFont (Wishart et al. +2022) and GO (Ashburner et al. 2000), are dedicated to cre- +ating an ontology of chemicals. Yet, existing approaches to +creating and expanding these knowledge bases are often bot- +tlenecked by the need for time-consuming manual annota- +tion processes that often require the expertise of domain ex- +perts. +*These authors contributed equally. +Copyright © 2023, Association for the Advancement of Artificial +Intelligence (www.aaai.org). All rights reserved. +.28 .12 .03 +.95 .80 .54 +Annotate +Partially +Language +Model +Train +Generate +Data +Predict +Sample +.54 +.28 +.80 .12 +.95 .03 +Not sampled +Sampled +Unannotated +Annotated positive +Annotated negative +Predictions with +probability score +DB +Publications +about food +( , ) +Sentence Relation +Repeat +x N +Figure 1: Overarching pipeline for semi-automated con- +struction of a food knowledge base from online publica- +tions using language models with active learning. From the +sentences in the literature that mention both foods (red) +and chemicals (green), we extract relations where food and +chemical entities are connected by the contains relation. We +then use active learning with a language model to partially +annotate and train over N = 10 rounds. +While knowledge bases have traditionally been curated +manually from text data (Kotova and Pisarev 2019), recent +approaches utilize deep learning-based state-of-the-art re- +lation extraction (RE) models for constructing knowledge +bases (Jiang et al. 2020). RE is a task in natural language +processing (NLP) that extracts semantic relations between +entities in natural language sentences (Bach and Badaskar +2007) (e.g., given a sentence ‘Joe Biden is the president +of United States’, an RE model extracts a relation of ‘is- +PresidentOf’ between the entities ‘Joe Biden’ and ‘United +States’). However, these deep learning-based approaches of- +ten require many labeled training data (LeCun, Bengio, and +Hinton 2015), which is often not feasible in fields like food +science where the data annotation procedure is expensive. +To address such issue, active learning methods, which use +the model to choose the data that can most efficiently im- +prove its performance and therefore reduce the amount of +data needed for achieving the desired training outcome (Set- +arXiv:2301.11322v1 [cs.CL] 24 Jan 2023 + +tles 2009), have been widely used in various fields such as +natural language processing (Shen et al. 2004; Longpre et al. +2022; Rotman and Reichart 2022), computer vision (Cole- +man et al. 2022; Chen et al. 2022; Yu et al. 2021), and stud- +ies in biology (Wang et al. 2020). Furthermore, recent works +suggest that semi-automatic models are desirable for tasks +related to knowledge bases as machines, though faster than +human annotators, often yield low recall (Wang, Guo, and +Chen 2022; Zhuang et al. 2017). +In this work, we propose a semi-automated framework +for constructing a knowledge base of food composition in- +formation using active learning of language models (Figure +1). Training and evaluating 100 runs of the proposed active +learning strategy each with a unique random seed shows that +language models are able to extract correct relations from +the sentence with high confidence (precision = 0.92 ± 0.04, +recall = 0.82 ± 0.07, and F1 = 0.87 ± 0.04). We also found +that using the proposed active learning sampling strategy ac- +celerates new knowledge discovery by 21.0% ± 0.05%. All +code and instructions on how to reproduce the results can be +found in https://github.com/ibpa/SemiAutomatedFoodKBC. +Proposed Method +Data Generation. We downloaded 1,226 food names (both +commonly used and scientific) from FooDB (TMIC 2017) +and used them to query LitSense (Allot et al. 2019), a +sentence-level search system provided by National Center +for Biotechnology Information (NCBI) for biomedical liter- +ature from PubMed and PubMed Central (PMC). We used +the search query template ‘food name contains’ as we em- +pirically found that the returned sentences had the most +food-chemical relations. In addition to the food and chemi- +cal entities already tagged and returned by LitSense for each +sentence, we manually generated a list of 70 common food +parts and used a rule-based named entity recognition (NER) +approach to strictly match the food part entities. Note that +since LitSense returns all species in the NCBI taxonomy +even if they are not food, we dropped non-food species by +keeping only the species in the 1,226 FooDB food names. +Therefore, for each sentence s ∈ S returned by LitSense, +three sets of entities F, P, and C for foods, food parts, +and chemicals, respectively, were recognized. Finally, we +extracted a set of relations R for s as +R = {template(f, p, c) | ∀(f, p, c) ∈ F × P × C} +∪{template(f, c) | ∀(f, c) ∈ F × C}, +(1) +where template(·) is a function that takes as input enti- +ties, with or without food part entity, and outputs a contains +relation between entities. For example, template(apple, vi- +tamin A) = apple contains vitamin A and template(apple, +skin, vitamin A) = apple skin contains vitamin A, if there is +a food part entity. This process resulted in 85,839 sentence- +relation (SR) pairs with 21,313 unique sentences. +Data Annotation. We randomly selected train, valida- +tion, and test datasets from these SR pairs, while making +sure there were no duplicate sentences or relations across the +datasets that could cause bias during training and evaluating +Table 1: Statistics of the dataset used for training and +evaluation. Note that the training data of 1,000 sentence- +relation(SR) pairs are distributed evenly across N=10 rounds +of active learning, while the validation and test set is held- +out for consistency. +train +val +test +# of SR pairs +1,000 +300 +300 +# of positive SR pairs +453 +116 +129 +# of negative SR pairs +547 +184 +171 +# of unique sentences +747 +174 +157 +# of unique relations +1,000 +300 +300 +# of entities +537 +175 +169 +# of food entities +288 +95 +86 +# of chemical entities +249 +80 +83 +(Table 1). During the manual annotation process, two anno- +tators were asked to assign three possible classes positive, +negative, and skip. The positive class was assigned when +there was enough evidence in the sentence to support that the +paired relation was true. Otherwise, the negative class was +assigned. We assigned the class skip if the NER by LitSense +was not performed correctly, where we eventually discarded +the skipped SR pairs. To ensure that the annotation quality +was high, we kept only the positive and negative SR pairs +whose annotation results were consensus between the two +annotators. +Language Model. We used the BioBERT (Lee et al. +2020) language model that is based on the original BERT +(Devlin et al. 2018) model but trained using biomedical do- +main corpora PubMed and PMC, sharing a similar domain +of the text returned by LitSense (Allot et al. 2019). We for- +matted input to the model by concatenating the sentence +and relation strings separated by the [SEP] token. We fine- +tuned BioBERT using the binary classification scheme, with +the grid search of 8 possible hyperparameter combinations +(learning rate = {2 × 10-5, 5 × 10-5}, batch size = {16, 32}, +and epochs = {3, 4}) using the validation set. Finally, the +hyperparameter set with the highest precision score was se- +lected for the active learning step that we will discuss in the +next section. +Active Learning. We used the pool-based active learn- +ing strategy as defined by Settles (Settles 2009), where we +split the total training pool of 1,000 annotated SR pairs (Ta- +ble 1) equally into 10 active learning rounds. For the first +round, we randomly sampled 100 training SR pairs from the +pool. We trained the language models using these 100 SR +pairs and then predicted the probability of being a positive +class, denoted as p, for the remaining 900 SR pairs. From +round 2, we selected the data using the uncertainty sam- +pling scheme, where we sampled the SR pairs closest to the +model decision boundary. The uncertainty score for each SR +pair was calculated as min(1 − p, p), and then the uncer- +tainty scheme would sample 100 SR pairs with the highest +uncertainty scores. These newly sampled data were added to +the previous training data and used to train the next rounds. + +Table 2: Performance metrics for 100 runs of active learning (uncertainty sampling) compared to the random sampling. +Rounds +Metric +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +Active Learning (Uncertain) +Precision +.87±.09 .86±.05 .87±.05 .88±.05 .89±.05 .89±.04 .91±.04 .91±.04 .92±.04 .92±.04 +Recall +.51±.29 .72±.13 .76±.10 .78±.10 .76±.12 .78±.09 .81±.07 .80±.08 .81±.08 .82±.07 +F1 +.57±.28 .77±.08 .81±.07 .82±.01 .81±.07 .83±.06 .85±.05 .85±.05 .86±.04 .87±.04 +Accuracy +.75±.10 .83±.04 .85±.04 .86±.04 .85±.04 .86±.04 .88±.03 .88±.03 .89±.03 .89±.03 +Specificity .93±.07 .91±.04 .91±.04 .92±.04 .93±.04 .93±.03 .94±.03 .94±.03 .95±.03 .94±.03 +Random +Precision +.88±.07 .88±.05 .88±.04 .88±.04 .90±.04 .90±.04 .91±.04 .91±.04 .92±.04 .92±.04 +Recall +.49±.28 .70±.17 .76±.10 .78±.11 .79±.09 .79±.08 .81±.07 .82±.08 .81±.07 .82±.06 +F1 +.56±.27 .77±.12 .81±.06 .82±.06 .84±.05 .84±.04 .86±.04 .86±.04 .86±.04 .87±.04 +Accuracy +.74±.10 .83±.06 .85±.03 .86±.03 .87±.03 .87±.03 .88±.03 .89±.03 .89±.03 .89±.03 +Specificity .93±.06 .92±.05 .92±.04 .92±.04 .93±.03 .93±.03 .94±.03 .94±.03 .95±.03 .95±.03 +0.8 +0.6 +0.4 +0.2 +0.0 +1.0 +0.8 +0.6 +0.4 +0.2 +0.0 +1.0 +0.8 +0.6 +0.4 +0.2 +0.0 +1.0 +0.9 +0.8 +0.7 +0.6 +0.5 +1.0 +Recall +False Positive Rate +Precision +True Positive Rate +AUCPR: 0.94 +AUCPR (baseline): 0.43 +AUROC: 0.95 +AUROC (baseline): 0.50 +Figure 2: Precision-recall (left) and receiver operating char- +acteristic (right) curves of the models at round 10 with active +learning. Gray lines indicate the curves for 100 runs, and the +black lines are plotted by concatenating the model probabil- +ities of the test data from these 100 runs. The baseline refers +to a classifier that always predicts the minority class (posi- +tive). +We repeated this process until round 10 when all training +data were sampled and consumed by the model. We refer to +this whole process as a single run of active learning, and we +repeated this active learning run 100 times to provide statis- +tical significance for our results. +Experiments and Results +Settings. We used 2 × Nvidia RTX A5000 GPUs for the +training and inference of the BioBERT language models, +one GPU for each of the active learning strategies, (i.e., un- +certainty sampling and random sampling.) The entire proce- +dure (100 runs × 10 rounds for each active learning strat- +egy) took approximately 90 hours, where around 85% of the +time was used for model training and the rest for data sam- +pling and evaluation. The model pipeline was implemented +with PyTorch (Paszke et al. 2019) and HuggingFace’s trans- +former library (Wolf et al. 2019). +Active Learning Results. The performance metrics ob- +tained from 100 different runs of the active learning each +with 10 rounds and different random seeds are shown in Fig- +ure 2. Compared to the first round, precision at the final 10th +round increased by 6.2% (0.87 ± 0.09 vs. 0.92 ± 0.04, re- +spectively), recall by 62.0% (0.51±0.29 vs. 0.82±0.07, re- +spectively), and F1 by 50.9% (0.57±0.28 vs. 0.87±0.04, re- +spectively). However, compared to the random sampling ac- +tive learning strategy which chooses the samples to train on +for the subsequent round randomly (gray line and box plots +in Figure 2), we did not observe any statistical difference be- +tween the performance metrics (p-value > 0.05) except for +precision at round 2 (0.88 ± 0.05 vs. 0.86 ± 0.05, respec- +tively, p-value = 3.4 × 10-2), recall at round 5 (0.79 ± 0.09 +vs. 0.76 ± 0.12, respectively, p-value = 2.9 × 10-2), and +F1 at round 5 (0.84 ± 0.05 vs. 0.81 ± 0.07, respectively, p- +value = 6.8 × 10-3). The performance metrics start to show +an insignificant difference in their average values compared +to the final round at round 8 for precision (0.91 ± 0.04 vs. +0.92 ± 0.04, respectively, p-value = 1.6 × 10-1), round 7 +for recall (0.81 ± 0.07 vs. 0.82 ± 0.07, respectively, p-value += 2.5×10-1), and round 9 for F1(0.86±0.04 vs. 0.87±0.04, +respectively, p-value = 3.2 × 10-1). Note that all p-values +were calculated using the two-sided t-test. The models at the +final round that was trained using the complete training data +have AUCPR = 0.94 and AUROC = 0.95 as shown in Fig- +ure 2. +Although both the random sampling strategy and the un- +certainty active learning strategy discover the same set of +453 positives in the training pool with 1,000 SR pairs (Table +1) at the final round, the uncertainty active learning strategy +discovers positive data 21.0%±0.05% faster on average dur- +ing the intermediate rounds (rounds 2 through 9) compared +to the random sampling strategy that discovers new knowl- +edge in each round of training linearly (Figure 3). +Conclusion +In this work, we presented a semi-automated framework +for constructing a food composition knowledge base from + +Active Learning (Uncertain) +Random +100 +200 +300 +400 +9 +8 +7 +6 +5 +4 +3 +2 +1 +10 +Round +# of new positives +22.1% +24.1% +24.3% +24.5% +24.3% +22.4% +16.6%9.5% +Figure 3: Rate of discovery of new knowledge within the +training data compared between the uncertainty active learn- +ing sampling strategy and the random sampling strategy. +Percentage value denotes the increase of relative abundance +of positive data for each round from random to uncertainty +active learning method. +scientific literature using pre-trained language models sup- +ported by active learning. Although the active learning sam- +pling strategy that selects the uncertain samples around the +probability of 0.5 has not shown statistically significant pre- +dictive performance improvement over the random sampling +approach, we found that the uncertainty sampling approach +was able to find new positive data faster, therefore leading +to the creation of knowledge base in an accelerated manner +than the random sampling. In future work, we plan to test ad- +ditional sampling strategies like disproportionate stratified +sampling and one that only samples data with the highest +probability scores. We also plan to train the model with a +bigger dataset, create a knowledge graph of food-chemical +information enriched with ontological relationships like tax- +onomy and chemical classification, and perform link predic- +tion (Yao, Mao, and Luo 2019; Youn and Tagkopoulos 2022) +on the knowledge graph to discover novel food-chemical re- +lations. +References +Allot, A.; Chen, Q.; Kim, S.; Vera Alvarez, R.; Comeau, +D. C.; Wilbur, W. J.; and Lu, Z. 2019. LitSense: making +sense of biomedical literature at sentence level. +Nucleic +acids research, 47(W1): W594–W599. +Ashburner, M.; Ball, C. A.; Blake, J. A.; Botstein, D.; But- +ler, H.; Cherry, J. M.; Davis, A. P.; Dolinski, K.; Dwight, +S. S.; Eppig, J. T.; et al. 2000. Gene ontology: tool for the +unification of biology. Nature genetics, 25(1): 25–29. +Bach, N.; and Badaskar, S. 2007. A review of relation ex- +traction. Literature review for Language and Statistics II, 2: +1–15. +Chen, Z.; Zhang, J.; Wang, P.; Chen, J.; and Li, J. 2022. +When Active Learning Meets Implicit Semantic Data Aug- +mentation. In European Conference on Computer Vision, +56–72. Springer. +Coleman, C.; Chou, E.; Katz-Samuels, J.; Culatana, S.; +Bailis, P.; Berg, A. C.; Nowak, R.; Sumbaly, R.; Zaharia, M.; +and Yalniz, I. Z. 2022. Similarity Search for Efficient Active +Learning and Search of Rare Concepts. Proceedings of the +AAAI Conference on Artificial Intelligence, 36: 6402–6410. +Davis, A. P.; Wiegers, T. C.; Johnson, R. J.; Sciaky, D.; +Wiegers, J.; and Mattingly, C. 2022. +Comparative Toxi- +cogenomics Database (CTD): update 2023. Nucleic acids +research. +Devlin, J.; Chang, M.-W.; Lee, K.; and Toutanova, K. 2018. +Bert: Pre-training of deep bidirectional transformers for lan- +guage understanding. arXiv preprint arXiv:1810.04805. +Jacobs, D. R.; and Tapsell, L. C. 2007. Food, Not Nutrients, +Is the Fundamental Unit in Nutrition. Nutrition Reviews, 65: +439–450. +Jiang, H.; Bao, Q.; Cheng, Q.; Yang, D.; Wang, L.; and Xiao, +Y. 2020. Complex relation extraction: Challenges and op- +portunities. arXiv preprint arXiv:2012.04821. +Kanehisa, M.; and Goto, S. 2000. KEGG: kyoto encyclo- +pedia of genes and genomes. Nucleic acids research, 28(1): +27–30. +Kotova, E. E.; and Pisarev, I. A. 2019. Automated Creation +of Knowledge Bases for Intelligent Systems, taking into ac- +count Linguistic Uncertainty. +In 2019 XXII International +Conference on Soft Computing and Measurements (SCM)), +149–152. IEEE. +LeCun, Y.; Bengio, Y.; and Hinton, G. 2015. Deep learning. +nature, 521(7553): 436–444. +Lee, J.; Yoon, W.; Kim, S.; Kim, D.; Kim, S.; So, C. H.; and +Kang, J. 2020. BioBERT: a pre-trained biomedical language +representation model for biomedical text mining. Bioinfor- +matics, 36(4): 1234–1240. +Longpre, S.; Reisler, J.; Huang, E. G.; Lu, Y.; Frank, +A.; Ramesh, N.; and DuBois, C. 2022. +Active Learning +Over Multiple Domains in Natural Language Tasks. arXiv +preprint arXiv:2202.00254. +Paszke, A.; Gross, S.; Massa, F.; Lerer, A.; Bradbury, J.; +Chanan, G.; Killeen, T.; Lin, Z.; Gimelshein, N.; Antiga, L.; +et al. 2019. Pytorch: An imperative style, high-performance +deep learning library. Advances in neural information pro- +cessing systems, 32. +Rothwell, J. A.; Perez-Jimenez, J.; Neveu, V.; Medina- +Remon, A.; M’hiri, N.; Garc´ıa-Lobato, P.; Manach, C.; +Knox, C.; Eisner, R.; Wishart, D. S.; et al. 2013. Phenol- +Explorer 3.0: a major update of the Phenol-Explorer +database to incorporate data on the effects of food process- +ing on polyphenol content. Database, 2013. +Rotman, G.; and Reichart, R. 2022. +Multi-task Active +Learning for Pre-trained Transformer-based Models. arXiv +preprint arXiv:2208.05379. +Settles, B. 2009. Active learning literature survey. Techni- +cal report, University of Wisconsin-Madison Department of +Computer Sciences. +Shen, D.; Zhang, J.; Su, J.; Zhou, G.; and Tan, C. L. 2004. +Multi-criteria-based active learning for named entity recog- +nition. In Proceedings of the 42nd annual meeting of the + +Association for Computational Linguistics (ACL-04), 589– +596. +Shinbo, Y.; Nakamura, Y.; Altaf-Ul-Amin, M.; Asahi, H.; +Kurokawa, K.; Arita, M.; Saito, K.; Ohta, D.; Shibata, +D.; and Kanaya, S. 2006. +KNApSAcK: a comprehen- +sive species-metabolite relationship database. +In Plant +metabolomics, 165–181. Springer. +TMIC. 2017. The Metabolomics Innovation Centre. FooDB +Version 1.0. https://foodb.ca. +USDA. +2019. +U.S. +Department +of +Agriculture, +Agricultural +Research +Service. +FoodData +Central. +https://fdc.nal.usda.gov. +Wang, J.; Guo, B.; and Chen, L. 2022. +Human-in-the- +loop Machine Learning: A Macro-Micro Perspective. arXiv +preprint arXiv:2202.10564. +Wang, X.; Rai, N.; Pereira, B. M. P.; Eetemadi, A.; and +Tagkopoulos, I. 2020. +Accelerated knowledge discovery +from omics data by optimal experimental design. Nature +Communications, 11. +Wishart, D. S.; Girod, S.; Peters, H.; Oler, E.; Jovel, J.; +Budinski, Z.; Milford, R.; Lui, V. W.; Sayeeda, Z.; Mah, R.; +et al. 2022. ChemFOnt: the chemical functional ontology +resource. Nucleic Acids Research. +Wolf, T.; Debut, L.; Sanh, V.; Chaumond, J.; Delangue, C.; +Moi, A.; Cistac, P.; Rault, T.; Louf, R.; Funtowicz, M.; et al. +2019. Huggingface’s transformers: State-of-the-art natural +language processing. arXiv preprint arXiv:1910.03771. +Yao, L.; Mao, C.; and Luo, Y. 2019. +KG-BERT: +BERT for knowledge graph completion. +arXiv preprint +arXiv:1909.03193. +Youn, J.; and Tagkopoulos, I. 2022. +KGLM: Integrating +Knowledge Graph Structure in Language Models for Link +Prediction. arXiv preprint arXiv:2211.02744. +Young, V. R. 2003. Trace Element Biology: The Knowledge +Base and its Application for the Nutrition of Individuals and +Populations. The Journal of Nutrition, 133: 1581S–1587S. +Yu, W.; Zhu, S.; Yang, T.; and Chen, C. 2021. Consistency- +based Active Learning for Object Detection. IEEE Com- +puter Society Conference on Computer Vision and Pattern +Recognition Workshops, 2022-June: 3950–3959. +Zhuang, Y.; Li, G.; Zhong, Z.; and Feng, J. 2017. Hike: A +hybrid human-machine method for entity alignment in large- +scale knowledge bases. In Proceedings of the 2017 ACM +on Conference on Information and Knowledge Management, +1917–1926. +Acknowledgments +We would like to thank the members of the Tagkopoulos lab +for their suggestions, and Gabriel Simmons for the initial +discussions. This work was supported by the USDA-NIFA +AI Institute for Next Generation Food Systems (AIFS), +USDA-NIFA award number 2020-67021-32855, and the +NIEHS grant P42ES004699 to I.T. All computational anal- +yses were performed and the figures were generated by J.Y. +and F.L., and J.Y., F.L., and I.T. contributed to the critical +analysis and wrote the paper. I.T. supervised all aspects of +the project. + diff --git a/idFIT4oBgHgl3EQfpStZ/content/tmp_files/load_file.txt b/idFIT4oBgHgl3EQfpStZ/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..87654cbb1c052dfc452ba726b9218ec0d67a6fb8 --- /dev/null +++ b/idFIT4oBgHgl3EQfpStZ/content/tmp_files/load_file.txt @@ -0,0 +1,883 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf,len=882 +page_content='Semi-Automated Construction of Food Composition Knowledge Base Jason Youn,*1,2,3 Fangzhou Li,*1,2,3 Ilias Tagkopoulos1,2,3 1 Department of Computer Science, University of California, Davis, CA 95616, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2Genome Center, University of California, Davis, CA 95616, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 3USDA/NSF AI Institute for Next Generation Food Systems (AIFS), University of California, Davis, CA 95616, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' jyoun, fzli, itagkopoulos@ucdavis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='edu Abstract A food composition knowledge base, which stores the essen- tial phyto-, micro-, and macro-nutrients of foods is useful for both research and industrial applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Although many ex- isting knowledge bases attempt to curate such information, they are often limited by time-consuming manual curation processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Outside of the food science domain, natural lan- guage processing methods that utilize pre-trained language models have recently shown promising results for extracting knowledge from unstructured text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In this work, we propose a semi-automated framework for constructing a knowledge base of food composition from the scientific literature avail- able online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' To this end, we utilize a pre-trained BioBERT language model in an active learning setup that allows the optimal use of limited training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Our work demonstrates how human-in-the-loop models are a step toward AI-assisted food systems that scale well to the ever-increasing big data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Introduction Constructing high-quality knowledge bases of foods is cru- cial for various needs like understanding the impact of di- etary intake on human health and enabling personalized diet recommendations (Jacobs and Tapsell 2007;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Young 2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' There exist multiple knowledge bases that attempt to cu- rate food composition information such as FoodData Cen- tral (406,999 foods and ∼300 key nutrients) (USDA 2019), FooDB (797 foods and 15,750 chemicals) (TMIC 2017), KNApSAcK (24,704 plant species and 62,647 metabolites) (Shinbo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2006), and Phenol-Explorer (459 foods and 501 polyphenols) (Rothwell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In an effort to cu- rate the relationship between chemicals and human health, knowledge bases like CTD (Davis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022) and KEGG (Kanehisa and Goto 2000) curate information about the in- teractions among chemicals, genes, and/or disease entities, while other resources, including ChemFont (Wishart et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022) and GO (Ashburner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2000), are dedicated to cre- ating an ontology of chemicals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yet, existing approaches to creating and expanding these knowledge bases are often bot- tlenecked by the need for time-consuming manual annota- tion processes that often require the expertise of domain ex- perts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' These authors contributed equally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Copyright © 2023, Association for the Advancement of Artificial Intelligence (www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='aaai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='org).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' All rights reserved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='80 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='54 Annotate Partially Language Model Train Generate Data Predict Sample .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='54 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='80 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 Not sampled Sampled Unannotated Annotated positive Annotated negative Predictions with probability score DB Publications about food ( , ) Sentence Relation Repeat x N Figure 1: Overarching pipeline for semi-automated con- struction of a food knowledge base from online publica- tions using language models with active learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' From the sentences in the literature that mention both foods (red) and chemicals (green), we extract relations where food and chemical entities are connected by the contains relation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We then use active learning with a language model to partially annotate and train over N = 10 rounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' While knowledge bases have traditionally been curated manually from text data (Kotova and Pisarev 2019), recent approaches utilize deep learning-based state-of-the-art re- lation extraction (RE) models for constructing knowledge bases (Jiang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' RE is a task in natural language processing (NLP) that extracts semantic relations between entities in natural language sentences (Bach and Badaskar 2007) (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=', given a sentence ‘Joe Biden is the president of United States’, an RE model extracts a relation of ‘is- PresidentOf’ between the entities ‘Joe Biden’ and ‘United States’).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' However, these deep learning-based approaches of- ten require many labeled training data (LeCun, Bengio, and Hinton 2015), which is often not feasible in fields like food science where the data annotation procedure is expensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' To address such issue, active learning methods, which use the model to choose the data that can most efficiently im- prove its performance and therefore reduce the amount of data needed for achieving the desired training outcome (Set- arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='11322v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='CL] 24 Jan 2023 tles 2009), have been widely used in various fields such as natural language processing (Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2004;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Longpre et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Rotman and Reichart 2022), computer vision (Cole- man et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2021), and stud- ies in biology (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Furthermore, recent works suggest that semi-automatic models are desirable for tasks related to knowledge bases as machines, though faster than human annotators, often yield low recall (Wang, Guo, and Chen 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhuang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In this work, we propose a semi-automated framework for constructing a knowledge base of food composition in- formation using active learning of language models (Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Training and evaluating 100 runs of the proposed active learning strategy each with a unique random seed shows that language models are able to extract correct relations from the sentence with high confidence (precision = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04, recall = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07, and F1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We also found that using the proposed active learning sampling strategy ac- celerates new knowledge discovery by 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0% ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' All code and instructions on how to reproduce the results can be found in https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='com/ibpa/SemiAutomatedFoodKBC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Proposed Method Data Generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We downloaded 1,226 food names (both commonly used and scientific) from FooDB (TMIC 2017) and used them to query LitSense (Allot et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019), a sentence-level search system provided by National Center for Biotechnology Information (NCBI) for biomedical liter- ature from PubMed and PubMed Central (PMC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We used the search query template ‘food name contains’ as we em- pirically found that the returned sentences had the most food-chemical relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In addition to the food and chemi- cal entities already tagged and returned by LitSense for each sentence, we manually generated a list of 70 common food parts and used a rule-based named entity recognition (NER) approach to strictly match the food part entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Note that since LitSense returns all species in the NCBI taxonomy even if they are not food, we dropped non-food species by keeping only the species in the 1,226 FooDB food names.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Therefore, for each sentence s ∈ S returned by LitSense, three sets of entities F, P, and C for foods, food parts, and chemicals, respectively, were recognized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Finally, we extracted a set of relations R for s as R = {template(f, p, c) | ∀(f, p, c) ∈ F × P × C} ∪{template(f, c) | ∀(f, c) ∈ F × C}, (1) where template(·) is a function that takes as input enti- ties, with or without food part entity, and outputs a contains relation between entities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' For example, template(apple, vi- tamin A) = apple contains vitamin A and template(apple, skin, vitamin A) = apple skin contains vitamin A, if there is a food part entity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' This process resulted in 85,839 sentence- relation (SR) pairs with 21,313 unique sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Data Annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We randomly selected train, valida- tion, and test datasets from these SR pairs, while making sure there were no duplicate sentences or relations across the datasets that could cause bias during training and evaluating Table 1: Statistics of the dataset used for training and evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Note that the training data of 1,000 sentence- relation(SR) pairs are distributed evenly across N=10 rounds of active learning, while the validation and test set is held- out for consistency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' train val test # of SR pairs 1,000 300 300 # of positive SR pairs 453 116 129 # of negative SR pairs 547 184 171 # of unique sentences 747 174 157 # of unique relations 1,000 300 300 # of entities 537 175 169 # of food entities 288 95 86 # of chemical entities 249 80 83 (Table 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' During the manual annotation process, two anno- tators were asked to assign three possible classes positive, negative, and skip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The positive class was assigned when there was enough evidence in the sentence to support that the paired relation was true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Otherwise, the negative class was assigned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We assigned the class skip if the NER by LitSense was not performed correctly, where we eventually discarded the skipped SR pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' To ensure that the annotation quality was high, we kept only the positive and negative SR pairs whose annotation results were consensus between the two annotators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Language Model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We used the BioBERT (Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2020) language model that is based on the original BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2018) model but trained using biomedical do- main corpora PubMed and PMC, sharing a similar domain of the text returned by LitSense (Allot et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We for- matted input to the model by concatenating the sentence and relation strings separated by the [SEP] token.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We fine- tuned BioBERT using the binary classification scheme, with the grid search of 8 possible hyperparameter combinations (learning rate = {2 × 10-5, 5 × 10-5}, batch size = {16, 32}, and epochs = {3, 4}) using the validation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Finally, the hyperparameter set with the highest precision score was se- lected for the active learning step that we will discuss in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Active Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We used the pool-based active learn- ing strategy as defined by Settles (Settles 2009), where we split the total training pool of 1,000 annotated SR pairs (Ta- ble 1) equally into 10 active learning rounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' For the first round, we randomly sampled 100 training SR pairs from the pool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We trained the language models using these 100 SR pairs and then predicted the probability of being a positive class, denoted as p, for the remaining 900 SR pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' From round 2, we selected the data using the uncertainty sam- pling scheme, where we sampled the SR pairs closest to the model decision boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The uncertainty score for each SR pair was calculated as min(1 − p, p), and then the uncer- tainty scheme would sample 100 SR pairs with the highest uncertainty scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' These newly sampled data were added to the previous training data and used to train the next rounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Table 2: Performance metrics for 100 runs of active learning (uncertainty sampling) compared to the random sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Rounds Metric 1 2 3 4 5 6 7 8 9 10 Active Learning (Uncertain) Precision .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 Recall .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='51±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='29 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='72±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='13 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='76±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='78±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='76±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='78±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='80±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 F1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='57±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='77±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='01 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='83±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='85±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='85±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 Accuracy .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='75±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='83±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='85±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='85±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 Specificity .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='93±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='93±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='93±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 Random Precision .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='90±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='90±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 Recall .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='49±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='28 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='70±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='17 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='76±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='78±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='79±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='09 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='79±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='08 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='06 F1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='56±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='27 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='77±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='12 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='84±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='84±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 Accuracy .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='74±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='83±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='85±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='89±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 Specificity .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='93±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='06 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='93±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='93±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95±.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0 Recall False Positive Rate Precision True Positive Rate AUCPR: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94 AUCPR (baseline): 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='43 AUROC: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95 AUROC (baseline): 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='50 Figure 2: Precision-recall (left) and receiver operating char- acteristic (right) curves of the models at round 10 with active learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Gray lines indicate the curves for 100 runs, and the black lines are plotted by concatenating the model probabil- ities of the test data from these 100 runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The baseline refers to a classifier that always predicts the minority class (posi- tive).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We repeated this process until round 10 when all training data were sampled and consumed by the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We refer to this whole process as a single run of active learning, and we repeated this active learning run 100 times to provide statis- tical significance for our results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Experiments and Results Settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We used 2 × Nvidia RTX A5000 GPUs for the training and inference of the BioBERT language models, one GPU for each of the active learning strategies, (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=', un- certainty sampling and random sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=') The entire proce- dure (100 runs × 10 rounds for each active learning strat- egy) took approximately 90 hours, where around 85% of the time was used for model training and the rest for data sam- pling and evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The model pipeline was implemented with PyTorch (Paszke et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019) and HuggingFace’s trans- former library (Wolf et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Active Learning Results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The performance metrics ob- tained from 100 different runs of the active learning each with 10 rounds and different random seeds are shown in Fig- ure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Compared to the first round, precision at the final 10th round increased by 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='2% (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='09 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04, re- spectively), recall by 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0% (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='51±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='29 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07, re- spectively), and F1 by 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='9% (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='57±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='28 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04, re- spectively).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' However, compared to the random sampling ac- tive learning strategy which chooses the samples to train on for the subsequent round randomly (gray line and box plots in Figure 2), we did not observe any statistical difference be- tween the performance metrics (p-value > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05) except for precision at round 2 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='88 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05, respec- tively, p-value = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='4 × 10-2), recall at round 5 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='79 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='09 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='76 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='12, respectively, p-value = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='9 × 10-2), and F1 at round 5 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='84 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07, respectively, p- value = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='8 × 10-3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The performance metrics start to show an insignificant difference in their average values compared to the final round at round 8 for precision (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='91 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='92 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04, respectively, p-value = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='6 × 10-1), round 7 for recall (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='81 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='82 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='07, respectively, p-value = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='5×10-1), and round 9 for F1(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='86±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='87±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04, respectively, p-value = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='2 × 10-1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Note that all p-values were calculated using the two-sided t-test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The models at the final round that was trained using the complete training data have AUCPR = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='94 and AUROC = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='95 as shown in Fig- ure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Although both the random sampling strategy and the un- certainty active learning strategy discover the same set of 453 positives in the training pool with 1,000 SR pairs (Table 1) at the final round, the uncertainty active learning strategy discovers positive data 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0%±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05% faster on average dur- ing the intermediate rounds (rounds 2 through 9) compared to the random sampling strategy that discovers new knowl- edge in each round of training linearly (Figure 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Conclusion In this work, we presented a semi-automated framework for constructing a food composition knowledge base from Active Learning (Uncertain) Random 100 200 300 400 9 8 7 6 5 4 3 2 1 10 Round # of new positives 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='1% 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='1% 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='3% 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='5% 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='3% 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='4% 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='6%9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='5% Figure 3: Rate of discovery of new knowledge within the training data compared between the uncertainty active learn- ing sampling strategy and the random sampling strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Percentage value denotes the increase of relative abundance of positive data for each round from random to uncertainty active learning method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' scientific literature using pre-trained language models sup- ported by active learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Although the active learning sam- pling strategy that selects the uncertain samples around the probability of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='5 has not shown statistically significant pre- dictive performance improvement over the random sampling approach, we found that the uncertainty sampling approach was able to find new positive data faster, therefore leading to the creation of knowledge base in an accelerated manner than the random sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In future work, we plan to test ad- ditional sampling strategies like disproportionate stratified sampling and one that only samples data with the highest probability scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' We also plan to train the model with a bigger dataset, create a knowledge graph of food-chemical information enriched with ontological relationships like tax- onomy and chemical classification, and perform link predic- tion (Yao, Mao, and Luo 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Youn and Tagkopoulos 2022) on the knowledge graph to discover novel food-chemical re- lations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' References Allot, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chen, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Vera Alvarez, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Comeau, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wilbur, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Lu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' LitSense: making sense of biomedical literature at sentence level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nucleic acids research, 47(W1): W594–W599.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Ashburner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Ball, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Blake, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Botstein, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' But- ler, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Cherry, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Davis, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Dolinski, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Dwight, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Eppig, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Gene ontology: tool for the unification of biology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nature genetics, 25(1): 25–29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bach, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Badaskar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' A review of relation ex- traction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Literature review for Language and Statistics II, 2: 1–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' When Active Learning Meets Implicit Semantic Data Aug- mentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In European Conference on Computer Vision, 56–72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Coleman, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chou, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Katz-Samuels, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Culatana, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bailis, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Berg, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nowak, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Sumbaly, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zaharia, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Yalniz, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Similarity Search for Efficient Active Learning and Search of Rare Concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Proceedings of the AAAI Conference on Artificial Intelligence, 36: 6402–6410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Davis, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wiegers, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Johnson, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Sciaky, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wiegers, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Mattingly, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Comparative Toxi- cogenomics Database (CTD): update 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nucleic acids research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Devlin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Lee, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Toutanova, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bert: Pre-training of deep bidirectional transformers for lan- guage understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:1810.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04805.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Jacobs, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Tapsell, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Food, Not Nutrients, Is the Fundamental Unit in Nutrition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nutrition Reviews, 65: 439–450.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Jiang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bao, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Cheng, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Xiao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Complex relation extraction: Challenges and op- portunities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='04821.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kanehisa, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Goto, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' KEGG: kyoto encyclo- pedia of genes and genomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nucleic acids research, 28(1): 27–30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kotova, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Pisarev, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Automated Creation of Knowledge Bases for Intelligent Systems, taking into ac- count Linguistic Uncertainty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In 2019 XXII International Conference on Soft Computing and Measurements (SCM)), 149–152.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' LeCun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bengio, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Hinton, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' nature, 521(7553): 436–444.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Lee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yoon, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kim, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' So, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Kang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' BioBERT: a pre-trained biomedical language representation model for biomedical text mining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bioinfor- matics, 36(4): 1234–1240.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Longpre, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Reisler, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Huang, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Lu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Frank, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Ramesh, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and DuBois, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Active Learning Over Multiple Domains in Natural Language Tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='00254.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Paszke, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Gross, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Massa, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Lerer, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Bradbury, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chanan, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Killeen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Lin, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Gimelshein, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Antiga, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Pytorch: An imperative style, high-performance deep learning library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Advances in neural information pro- cessing systems, 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Rothwell, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Perez-Jimenez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Neveu, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Medina- Remon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' M’hiri, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Garc´ıa-Lobato, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Manach, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Knox, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Eisner, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wishart, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Phenol- Explorer 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0: a major update of the Phenol-Explorer database to incorporate data on the effects of food process- ing on polyphenol content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Database, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Rotman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Reichart, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Multi-task Active Learning for Pre-trained Transformer-based Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='05379.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Settles, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Active learning literature survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Techni- cal report, University of Wisconsin-Madison Department of Computer Sciences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Shen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Su, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhou, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Tan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Multi-criteria-based active learning for named entity recog- nition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In Proceedings of the 42nd annual meeting of the Association for Computational Linguistics (ACL-04), 589– 596.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Shinbo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nakamura, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Altaf-Ul-Amin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Asahi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Kurokawa, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Arita, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Saito, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Ohta, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Shibata, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Kanaya, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' KNApSAcK: a comprehen- sive species-metabolite relationship database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In Plant metabolomics, 165–181.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' TMIC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The Metabolomics Innovation Centre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' FooDB Version 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' https://foodb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='ca.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' USDA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Department of Agriculture, Agricultural Research Service.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' FoodData Central.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' https://fdc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='nal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='usda.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='gov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Guo, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Chen, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Human-in-the- loop Machine Learning: A Macro-Micro Perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='10564.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Rai, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Pereira, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Eetemadi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Tagkopoulos, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Accelerated knowledge discovery from omics data by optimal experimental design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nature Communications, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wishart, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Girod, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Peters, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Oler, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Jovel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Budinski, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Milford, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Lui, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Sayeeda, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Mah, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' ChemFOnt: the chemical functional ontology resource.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Nucleic Acids Research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Wolf, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Debut, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Sanh, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Chaumond, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Delangue, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Moi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Cistac, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Rault, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Louf, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Funtowicz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Huggingface’s transformers: State-of-the-art natural language processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03771.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Mao, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Luo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' KG-BERT: BERT for knowledge graph completion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:1909.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='03193.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Youn, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Tagkopoulos, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' KGLM: Integrating Knowledge Graph Structure in Language Models for Link Prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='02744.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Young, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Trace Element Biology: The Knowledge Base and its Application for the Nutrition of Individuals and Populations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' The Journal of Nutrition, 133: 1581S–1587S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Yang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Chen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Consistency- based Active Learning for Object Detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' IEEE Com- puter Society Conference on Computer Vision and Pattern Recognition Workshops, 2022-June: 3950–3959.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhuang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Li, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Zhong, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and Feng, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Hike: A hybrid human-machine method for entity alignment in large- scale knowledge bases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' In Proceedings of the 2017 ACM on Conference on Information and Knowledge Management, 1917–1926.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' Acknowledgments We would like to thank the members of the Tagkopoulos lab for their suggestions, and Gabriel Simmons for the initial discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' This work was supported by the USDA-NIFA AI Institute for Next Generation Food Systems (AIFS), USDA-NIFA award number 2020-67021-32855, and the NIEHS grant P42ES004699 to I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' All computational anal- yses were performed and the figures were generated by J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=', and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=', F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=', and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' contributed to the critical analysis and wrote the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} +page_content=' supervised all aspects of the project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/idFIT4oBgHgl3EQfpStZ/content/2301.11322v1.pdf'} diff --git a/ktAyT4oBgHgl3EQf_fpg/content/2301.00909v1.pdf b/ktAyT4oBgHgl3EQf_fpg/content/2301.00909v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..38bdfb05c6aa67dbda7811e1154f3c2ee33b09ff --- /dev/null +++ b/ktAyT4oBgHgl3EQf_fpg/content/2301.00909v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:986b06250b203b734e4534830b64db0d839d954f64f62fd16cc689123b17e38f +size 586773 diff --git a/lNFQT4oBgHgl3EQfnTbj/content/tmp_files/2301.13369v1.pdf.txt b/lNFQT4oBgHgl3EQfnTbj/content/tmp_files/2301.13369v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..be5e840a9caf6224554805e1abbe75b4fdd6a58f --- /dev/null +++ b/lNFQT4oBgHgl3EQfnTbj/content/tmp_files/2301.13369v1.pdf.txt @@ -0,0 +1,3073 @@ +arXiv:2301.13369v1 [math.AP] 31 Jan 2023 +Free boundary problem with a nonlocal kernel ∗ +Xinfu Chen +Department of Mathematics, University of Pittsburgh, +Pittsburgh, PA 15260, USA. +Fang Li † +School of Mathematics, Sun Yat-sen University, +No. 135, Xingang Xi Road, Guangzhou 510275, P. R. China. +Maolin Zhou +Chern Institute of Mathematics and LPMC, Nankai University, +Tianjin 300071, P. R. China. +Abstract +In this paper, we propose a new nonlocal model for two-phase Stefan problem, where +the nonlocal version of the one-phase Stefan problem arises naturally as a special case. +Among other things, we obtain the optimal condition for the pointwise convergence be- +tween local and nonlocal one-phase Stefan problem and an equivalent characterization of +this optimal condition. Moreover, we provide some sufficient criteria for the continuous +expansion of free boundaries, and when the sufficient conditions are violated, we construct +examples to demonstrate that the jumping phenomena could happen on the free bound- +aries. The jumping phenomena is essentially induced by the nonlocal diffusion and thus +it does not appear in the classical Stefan problem. +Keywords: nonlocal Stefan problem, free boundary, jumping phenomena +MSC (2020): 35K57, 45K05, 35R35 +1 +Introduction +The classical Stefan problem is well known to describe the evolution of the interface between +two phases of a substance undergoing a phase change, for example the melting of a solid, such +as ice to water. Latent heat, defined as the heat or energy that is absorbed or released during a +∗M. Zhou was partially supported by National Key Research and Development Program of China +(2021YFA1002400, 2020YFA0713300) and Nankai Zhide Foundation and NSF of China (No. +12271437, +11971498). F. Li was supported by NSF of China (No. 11971498). +†Corresponding author. E-mail: lifang55@mail.sysu.edu.cn +1 + +2 +phase change of a substance, acts as an energy source or sink at a moving solid-liquid interface, +and the resulting boundary condition is known as the Stefan boundary condition. +In this paper, we propose and study the nonlocal version of two-phase Stefan problem + + + + + + + + + + + +γt(t, x) = a +� +{γ>0} +k(x − y)γ(t, y)dy − aγ(t, x)χ{γ>0} ++ b +� +{γ<−ℓ0} +η(x − y)(γ(t, y) + ℓ0)dy − b(γ(t, x) + ℓ0)χ{γ<−ℓ0} +t > 0, x ∈ Rn, +γ(0, x) = γ0(x) +x ∈ Rn, +(1.1) +where χE denotes the characteristic function of E, the kernel functions k, η satisfy +(K) +k ∈ C(Rn) ∩ L∞(Rn), k ≥ 0, k(0) > 0, +� +Rn k(x)dx = 1. +For clarity, always assume that +Ω0 is a smooth and bounded domain in Rn, ℓ0 is a positive constant. +(1.2) +For the initial data, we assume that +γ0(x) ∈ L∞(Rn), +γ0(x) = −α0 for x ∈ Rn \ ¯Ω0, α0 ∈ (0, ℓ0). +(1.3) +Also denote +γ+(t, x) = γ(t, x)χ{γ>0}, (γ(t, x) + ℓ0)− = (γ(t, x) + ℓ0)χ{γ<−ℓ0}, +and +Ω(t) = {x ∈ Rn | γ(t, x) ≥ 0}, Ω−(t) = {x ∈ Rn | γ(t, x) ≤ −ℓ0}. +(1.4) +These notations will be used whenever it is more convenient. +To better elaborate the formulation of the model, we first consider the classical one-phase +Stefan problem, which is the description, typically, of the melting of a body of ice, maintained +at zero degree centigrade, in contact with a region of water initially in Ω0. Based on latent +heat and conservation of energy, the model is formulated as follows + + + + + + + + + +θt(t, x) = d∆θ(t, x) +t > 0, x ∈ {θ(t, ·) > 0}, +∇xθ · ∇xs = −ℓ0 +t > 0, x ∈ ∂{θ(t, ·) > 0}, +θ = 0 +t > 0, x ∈ ∂{θ(t, ·) > 0}, +θ(0, x) = u0(x) +x ∈ ¯Ω0, +(1.5) +where θ = θ(t, x) denotes the water’s temperature, the free boundary ∂{θ(t, ·) > 0} at the time +t is given by the equation s(x) = t. Also set s(x) = 0 if x ∈ ¯Ω0. There are many famous papers +on the regularity of the free boundary, such as [6, 7, 10]. + +3 +On the basis of latent heat, the nonlocal version of one-phase Stefan problem is proposed as +follows + + + +γt(t, x) = d +� +{γ>0} +k(x − y)γ(t, y)dy − dγ(t, x)χ{γ>0} +t > 0, x ∈ Rn, +γ(0, x) = γ0(x) +x ∈ Rn, +(1.6) +where the kernel function k satisfies (K), and for the initial data, we assume that +γ0(x) ∈ L∞(Rn), γ0(x) = −ℓ0 for x ∈ Rn \ ¯Ω0, γ0|¯Ω0 ≥ 0, γ0|¯Ω0 ̸≡ 0. +(1.7) +The essence of the nonlocal Stefan problem (1.6) is at the time t, +• if x ∈ {x ∈ Rn | γ(t, x) ≤ 0}, then only it can absorb energy from outside; +• if x ∈ {x ∈ Rn | γ(t, x) > 0}, then not only it can absorb energy from outside, but also +transfer its energy outside. +Here, the value ℓ0 is in the status of latent heat, γ equal to −ℓ0 corresponds to the status of ice +at zero degree centigrade and γ reaching zero represents that there has already been sufficient +energy accumulated here for the phase change. +The nonlocal version of the two-phase Stefan problem (1.1) is proposed in the same spirit. +The phase change happens at either γ reaching zero or γ reaching −ℓ0 and the initial data +γ = −α0 in Rn \ ¯Ω0, where α0 ∈ (0, ℓ0), corresponds to the mixture of water and ice at zero +degree centigrade. Different from the one-phase case, in the initial data, γ0|¯Ω0 could change +signs and in particular, when both {x ∈ Rn | γ(t, x) < −ℓ0} and {x ∈ Rn | γ(t, x) > 0} are +nonempty, in the set {x ∈ Rn | − ℓ0 ≤ γ(t, x) ≤ 0}, the energy could be absorbed and released +from outside simultaneously. +We point out that the nonlocal version of the one-phase Stefan problems was also proposed +and studied in [5]. Some discussions will be placed when the results obtained in this paper +are related to those derived in [5]. Moreover, the fractional two-phase Stefan problem was +treated in [2], and more general, the two-phase Stefan problem with anomalous diffusion was +investigated in [3]. +The main purpose of this paper is to study effects of nonlocal diffusion operators on the +evolution of free boundaries and explore connections and discrepancies between the local and +nonlocal Stefan problems. +First of all, we establish results about local existence and global existence for the nonlocal +Stefan problems. +Theorem 1.1. Assume that in the problem (1.1), the kernel functions satisfy the assumption +(K), the condition (1.2) is valid and the initial data satisfies (1.3). Then the problem (1.1) +admits a unique classical solution γ(t, ·) ∈ L∞(Rn) defined for all t > 0, and γ satisfies the +estimate +ess inf +Rn γ0 ≤ γ(t, x) ≤ ess sup +Rn γ0 +for t > 0, x ∈ Rn. +(1.8) +Moreover, if γ0|¯Ω0 ∈ C(¯Ω0), then γ(t, ·) is continuous in ¯Ω0 and Rn \ ¯Ω0 for any t > 0. + +4 +Next, we investigate the convergence relations between local and nonlocal Stefan problems. +For simplicity, for ǫ > 0, denote +kǫ(x) = 1 +ǫnk(x +ǫ ), ηǫ(x) = 1 +ǫnη(x +ǫ ). +Before we present the main results, we briefly explain what should be the natural and optimal +assumptions on the nonlocal kernel functions in the studies of convergence relations between +models with local and nonlocal diffusions. Define the Fourier transform of the kernel function +k as follows +ˆk(ξ) = +� +Rn e−ix·ξk(x)dx. +Based on the properties of the Fourier transform, one observes that for φ ∈ L1(Rn) � C2(Rn) +� +Rn e−ix·ξ +� 1 +ǫ2 +� +Rn kǫ(x − y)φ(y)dy − 1 +ǫ2φ(x) +� +dx = 1 +ǫ2 +� +ˆk(ǫξ) − 1 +� +ˆφ(ξ), +� +Rn e−ix·ξ∆φ(x)dx = −|ξ|2 ˆφ(ξ), +and for fixed ξ, +lim +ǫ→0 +1 +ǫ2 +� +ˆk(ǫξ) − 1 +� +ˆφ(ξ) = −A|ξ|2 ˆφ(ξ) +under the condition +ˆk(ξ) = 1 − A|ξ|2 + o(|ξ|2) +as ξ → 0, +(1.9) +where A > 0 is a constant. This observation indicates that the condition (1.9) is optimal in the +studies of nonlocal approximation of Laplacian operator. Indeed, the nonlocal approximation +of heat equation is verified under this condition. See [1] for details. +We establish an important equivalent characterization of the condition (1.9). +Proposition 1.2. Assume that k satisfies the assumption (K). Then the following two state- +ments are equivalent. +(i) For 1 ≤ j, h ≤ n, j ̸= h, +� +Rn xjk(x)dx = 0, +� +Rn xjxhk(x)dx = 0, +� +Rn x2 +jk(x)dx = +1 +n +� +Rn |x|2k(x)dx < +∞. +(ii) The Fourier transform of k satisfies the assumption (1.9). +Moreover, 1 +2n +� +Rn |x|2k(x)dx = A. +In order not to interrupt the main theme of this paper, we leave the proof of this proposition +in the appendix. + +5 +We first establish the convergence result about the two-phase Stefan problem. Let γǫ be the +solution of the following problem + + + + + + + + + + + +(γǫ)t(t, x) = a +ǫ2 +� +{γǫ>0} +kǫ(x − y)γǫ(t, y)dy − a +ǫ2γǫ(t, x)χ{γǫ>0} ++ b +ǫ2 +� +{γǫ<−ℓ0} +ηǫ(x − y)(γǫ(t, y) + ℓ0)dy − b +ǫ2(γǫ(t, x) + ℓ0)χ{γǫ<−ℓ0} +t > 0, x ∈ Rn, +γǫ(0, x) = γ0(x) +x ∈ Rn. +(1.10) +Theorem 1.3. In the problem (1.10), assume that the conditions of Theorem 1.1 are valid. In +addition, assume that the kernel functions satisfy Proposition 1.2(i) and +� +Rn |x|3k(x)dx < +∞. +(1.11) +Then for any given T > 0, 0 < t < T,γǫ(t, ·) converges to γ(t, ·) in L1 +loc(Rn) as ǫ → 0+, where +γ ∈ L∞((0, T) × Rn) is the generalized solution of +� +∆u ∈ β(u)t, +β(u)(0, x) = γ0(x), +(1.12) +where A = a +2 +� +Rn |z|2k(z)dz, B = b +2 +� +Rn |z|2η(z)dz, +u = + + + + + +Aγ +for γ > 0 +0 +for − ℓ0 ≤ γ ≤ 0 +B(γ + ℓ0) +for γ < −l0 +and β(u) is a multivalued mapping defined as follows +β(u) = + + + + + + + + + +1 +B u − ℓ0 +for u < 0 +[−ℓ0, 0] +for u = 0 +1 +Au +for u > 0. +Thanks to Proposition 1.2, one sees that in Theorem 1.3, only the condition (1.11) is extra +in the studies of convergence relations. +Obviously, the kernel functions which are radially +symmetric and compactly supported satisfy the extra condition (1.11). +Next, the convergence relations between local and nonlocal one-phase Stefan problems are +verified under the optimal condition (1.9). Similar to (1.10), we rescale the problem (1.6) as +follows + + + +γǫt(t, x) = 1 +ǫ2 +� +Rn kǫ(x − y)γ+ +ǫ (t, y)dy − 1 +ǫ2γ+ +ǫ (t, x) +t > 0, x ∈ Rn, +γǫ(0, x) = γ0(x) +x ∈ Rn, +(1.13) +where for simplicity, set d = 1 and denote +γ+ +ǫ (t, x) = γǫ(t, x)χ{γǫ(t,x)>0}. + +6 +Theorem 1.4. In the problem (1.13), assume that the kernel function satisfies the assumption +(K), the condition (1.2) is valid and the initial data satisfies (1.7). Also, assume that the +Fourier transform of k satisfies (1.9). Then for any given T > 0, γ+ +ǫ converges to the solution +θ of the one-phase Stefan problem (1.5) in the following sense: +� t +0 +γ+ +ǫ (τ, x)dτ → +� t +min{s(x),t} +θ(τ, x)dτ +a.e. in (0, T) × Rn, +where we set d = A in the problem (1.5). +The convergence relations between local and nonlocal one-phase Stefan problems is also +studied in [5] under the additional conditions that the kernel function is radially symmetric +and compactly supported. +From now on, we mainly focus on the nonlocal one-phase Stefan problem and derive some +interesting and fundamental properties related to expansion, boundedness and continuity of free +boundaries in the nonlocal one-phase Stefan problem (1.6). Due to the lack of regularity in the +nonlocal Stefan problems, we will impose an extra condition that γ0|¯Ω0 ∈ C(¯Ω0) on the initial +data γ0 when discussing the properties of free boundaries. +Theorem 1.5. In the problem (1.6), assume that the kernel function satisfies the assumption +(K), the condition (1.2) is valid, and the initial data γ0 satisfies (1.7) and the extra condition +that γ0|¯Ω0 ∈ C(¯Ω0). We have the following statements. +(i) Expansion: there exists t0 > 0 such that Ω(t) = Ω(0) for 0 ≤ t ≤ t0 and Ω(t1) ⊆ Ω(t2) for +0 < t1 < t2. +(ii) Boundedness: there exists R > 0, which depends on the initial data only, such that Ω(t) ⊆ +BR(0) for all t > 0. +Theorem 1.5(i) is also proved in [5], where the kernel function is assumed to be compactly +supported and radially symmetric. For the nonlocal two-phase Stefan problem (1.1), due to the +interaction between Ω(t) and Ω−(t) denoted in (1.4), Theorem 1.5(i) might not hold. However, +thanks to the comparison principle, Theorem 1.5(ii) remains true for both Ω(t) and Ω−(t). +We further investigate the continuity of the free boundary in the nonlocal one-phase Stefan +problem. For convenience, we prepare an extra assumption about the kernel function as follows +(K1) k(x) is radially symmetric, decreasing in |x|. +Theorem 1.6. Under the conditions of Theorem 1.5, if additionally assume that ¯Ω0 is convex +and the assumption (K1) is valid, then Ω(t) expands continuously. +In Theorem 1.6, extra conditions on the kernel function k(x) and the initial domain Ω0 are +needed to guarantee the continuous expansion of the free boundary ∂Ω(t). A natural question is +what happens without these extra conditions. Two examples are constructed to show that when +either the extra condition on the kernel function or that on the initial domain Ω0 in Theorem +1.6 is violated, the population range could generate at a distant place. This is so called jumping + +7 +phenomena. Since the nonlocal dispersal describes the movement between non-adjacent spatial +locations, the jumping phenomena is natural. It also reflects the essential differences between +local and nonlocal dispersal operators. +We also point out that, if allowing the initial data to be nonconstant outside ¯Ω0, similar to +[5, Theorem 4.6], where the kernel function is assumed to be compactly supported and radially +symmetric, jumping phenomena could happen by choosing initial data properly. Indeed, the +conclusion is valid as long as the kernel function satisfies the assumption (K). We omit the +proof since it is similar. +At the end, the main features of our paper are summarized as follows. +• Formulation of a new nonlocal model for two-phase Stefan problem, where the nonlocal +version of the one-phase Stefan problem arises naturally as a special case. +• The optimal condition (1.9) for the pointwise convergence between local and nonlocal +one-phase Stefan problem in Theorem 1.4. +• An equivalent characterization between the conditions (i) about the kernel function and +(ii), i.e., (1.9), about the Fourier transform of the kernel function in Proposition 1.2. +• For local and global existence in Theorem 1.1, expansion and boundedness of free bound- +aries in Theorem 1.5, we only require the basic assumption (K) on the kernel functions. +• The sufficient conditions derived in Theorem 1.6 for the continuous expansion of the free +boundary when the initial data outside initial domain Ω0 is assumed to be a negative +constant. Counterexamples are constructed to demonstrate that the jumping phenomena +could happen when the sufficient conditions are violated. +This paper is organized as follows. Theorem 1.1 and some preliminary results for the problem +(1.1) are established in Section 2. In Section 3, we focus on the convergence relations between +local and nonlocal Stefan problems and present the proofs of Theorem 1.3 and Theorem 1.4. In +Section 4, Theorems 1.5 and 1.6 related to properties about the free boundary of the nonlocal +Stefan problem are verified. Moreover, we construct two examples where jumping phenomena +happen, when one of the additional assumptions in Theorem 1.6 is violated. At the end, the +proof of Proposition 1.2 is included in the appendix. +2 +Wellposedness and preliminaries +2.1 +Local and global existence +We first verify the local and global existence to the nonlocal version of the two-phase Stefan +problem (1.1). The same arguments can be applied to the the nonlocal version of the one-phase +Stefan problem (1.6) word by word. + +8 +Proof of Theorem 1.1. Denote M0 = ∥γ0∥L∞(Rn), Y = L∞(Rn), for s > 0, +Xs = +� +φ ∈ C([0, s), Y) +��φ(0, ·) = γ0(·), ∥φ(t, ·)∥L∞(Rn) ≤ 2M0, t ∈ [0, s) +� +, +and +∥φ∥C([0,s),Y) = sup +0≤t0} +k(x − y)φ(τ, y)dydτ − a +� t +0 +φ(τ, x)χ{φ>0}dτ ++ b +� t +0 +� +{φ<−ℓ0} +η(x − y)(φ(τ, y) + ℓ0)dydτ − b +� t +0 +(φ(τ, x) + ℓ0)χ{φ<−ℓ0}dτ. +Then it is routine to show that T φ ∈ C([0, s), Y), T φ(0, ·) = γ0(·) and +∥T φ∥C([0,s),L∞(Rn)) ≤ M0 + 2as∥φ∥C([0,s),Y) + 2bs∥φ∥C([0,s),Y) ≤ M0 + 4s (a + b) M0. +Moreover, for φ1, φ2 ∈ Xs, +∥T φ1 − T φ2∥C([0,s),Y) ≤ 2as∥φ1 − φ2∥C([0,s),Y) + 2bs∥φ1 − φ2∥C([0,s),Y). +Thus it is obvious that there exists t0 > 0, which depends a, b and M0 only and is sufficiently +small, such that for 0 < s ≤ t0, T maps Xs into Xs and T is a contraction mapping in Xs. +Hence by the contraction mapping theorem, for 0 < s ≤ t0, there exists a unique γ ∈ Xs +satisfying +γ(t, x) = γ0(x) + a +� t +0 +� +{γ>0} +k(x − y)γ(τ, y)dydτ − a +� t +0 +γ(τ, x)χ{γ>0}dτ ++ b +� t +0 +� +{γ<−ℓ0} +η(x − y)(γ(τ, y) + ℓ0)dydτ − b +� t +0 +(γ(τ, x) + ℓ0)χ{γ<−ℓ0}dτ +for 0 < t < s, x ∈ Rn. Thus, obviously γ is the unique solution to the problem (1.1). +Let (0, Tmax) denote the maximal time interval for which the solution γ(t, x) of the problem +(1.1) exists. +It remains to show Tmax = +∞. +For this purpose, it suffices to show that +∥γ(t, ·)∥L∞(Rn) is bounded in (0, Tmax). +To be more specific, we claim that γ satisfies the +estimate (1.8) in (0, Tmax). +Fix any 0 < T < Tmax. First, assume that the kernel functions k and η are compactly +supported. Then since ¯Ω0 is bounded, it is standard to show that {γ(t, x) ≥ 0} and {γ(t, x) ≤ +−ℓ0} remain bounded for 0 < t < T. +Notice that if |{γ0(x) > 0}| = 0, then by the equation satisfied by γ(t, x), one has +γ(t, x) ≤ ess sup +Rn γ0, +0 < t < T, x ∈ Rn. + +9 +Now we consider the case that |{γ0(x) > 0}| > 0. +Based on the problem (1.1), for any +1 < p < +∞, 0 < t < T, one has +(γ+)p−1γt(t, x) ≤ (γ+)p−1 +� +a +� +{γ>0} +k(x − y)γ(t, y)dy − aγ(t, x)χ{γ>0} +� +. +Then direct computation yields that for 0 < t < T, +1 +p +d +dt +� +Rn(γ+(t, x))pdx ≤ a +� +Rn(γ+(t, x))p−1 +�� +Rn k(x − y)γ+(t, y)dy − γ+(t, x) +� +dx +≤ +a +� +Rn(γ+(t, x))p−1 +�� +Rn k(x, y)dy +� p−1 +p �� +Rn k(x, y)(γ+(t, y))pdy +� 1 +p +dx − a∥γ+(t, ·)∥p +Lp(Rn) +≤ +a∥γ+(t, ·)∥p−1 +Lp(Rn) +�� +Rn +� +Rn k(x, y)(γ+(t, y))pdydx +� 1 +p +− a∥γ+(t, ·)∥p +Lp(Rn) ≤ 0. +Hence for any 1 < p < +∞, 0 < t < T, +∥γ+(t, ·)∥Lp(Rn) ≤ ∥γ+(0, ·)∥Lp(Rn), +and it follows that +γ(t, x) ≤ ess sup +Rn γ0, +0 < t < T, x ∈ Rn. +Similar arguments can be applied on (γ(t, x) + ℓ0)− to derive that +γ(t, x) ≥ ess inf +Rn γ0, +0 < t < T, x ∈ Rn. +The claim is proved for compactly supported kernel functions since T ∈ (0, Tmax) is arbitrary. +Now consider the case that the kernel functions k and η are not compactly supported. +Then there exists a series of kernels kj, ηj, j ≥ 1, which are compactly supported, satisfy the +assumption (K), and +lim +j→∞ ∥kj − k∥L1(Rn) = 0, lim +j→∞ ∥ηj − η∥L1(Rn) = 0. +(2.1) +Let γj denote the solution to the problem (1.1) with k replaced by kj and η replaced by ηj. Set +wj = γ − γj, j ≥ 1. Then wj satisfies + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(wj)t(t, x) = a +� +{γ>0} +k(x − y)γ(t, y)dy − aγ(t, x)χ{γ>0} +−a +� +{γj>0} +kj(x − y)γj(t, y)dy + aγj(t, x)χ{γj>0} ++b +� +{γ<−ℓ0} +η(x − y)(γ(t, y) + ℓ0)dy − b(γ(t, x) + ℓ0)χ{γ<−ℓ0} +−b +� +{γj<−ℓ0} +ηj(x − y)(γj(t, y) + ℓ0)dy + b(γj(t, x) + l0)χ{γj<−ℓ0} +0 < t < Tmax, x ∈ Rn, +wj(0, x) = 0 +x ∈ Rn. + +10 +Then for wj > 0, direct computation yields that +(wj)t(t, x) +≤ +a +� +{γ>0} +k(x − y) (γ(t, y) − γj(t, y)) dy + a +� +{γ>0} +k(x − y)γj(t, y)dy +−a +� +{γj>0} +(kj(x − y) − k(x − y)) γj(t, y)dy − a +� +{γj>0} +k(x − y)γj(t, y)dy ++b +� +{γ<−ℓ0} +η(x − y)(γ(t, y) + ℓ0)dy − b +� +{γj<−ℓ0} +η(x − y)(γ(t, y) + ℓ0)dy ++b +� +{γj<−ℓ0} +η(x − y)(γ(t, y) − γj(t, y))dy ++b +� +{γj<−ℓ0} +(η(x − y) − ηj(x − y))(γj(t, y) + ℓ0)dy +≤ +(a + b)∥wj∥L∞(Rn) + aM0∥kj − k∥L1(Rn) + bM0∥ηj − η∥L1(Rn), +where the last inequality follows from the fact that γj satisfies the estimate (1.8). Similarly for +wj < 0, we have +(−wj)t(t, x) ≤ (a + b)∥wj∥L∞(Rn) + aM0∥kj − k∥L1(Rn) + bM0∥ηj − η∥L1(Rn). +The above two inequalities indicate that for 0 < t < Tmax, +|wj(t, x)| = lim +δ→0 +� t +0 +∂ +∂τ +� +w2 +j(τ, x) + δ2� 1 +2 dτ += +lim +δ→0 +� t +0 +wj(τ, x) +� +w2 +j(τ, x) + δ2� 1 +2 +∂ +∂τ wj(τ, x)dτ +≤ +(a + b) +� t +0 +∥wj∥L∞(Rn)(τ)dτ + aM0∥kj − k∥L1(Rn)t + bM0∥ηj − η∥L1(Rn)t. +(2.2) +Denote +hj(t) = +� t +0 +∥wj∥L∞(Rn)(τ)dτ, +then (2.2) implies that for 0 < t < Tmax, +h′ +j(t) ≤ (a + b)hj(t) + aM0∥kj − k∥L1(Rn)t + bM0∥ηj − η∥L1(Rn)t. +Direct computation yields that for 0 < t < Tmax, +hj(t) ≤ +M0 +(a + b)2e(a+b)t � +a∥kj − k∥L1(Rn) + b∥ηj − η∥L1(Rn) +� +, +which, together with (2.2), yields that for 0 < t < Tmax, +∥wj∥L∞(Rn)(t) ≤ M0 +� +1 +a + be(a+b)t + t +� � +a∥kj − k∥L1(Rn) + b∥ηj − η∥L1(Rn) +� +. +(2.3) +This, together with (2.1) and the fact that γj satisfies the estimate (1.8) for all j ≥ 1, implies +the desired claim for general kernel functions under the assumption (K). +At the end, it is routine to verify that if γ0|¯Ω0 ∈ C(¯Ω0), then γ(t, ·) is continuous in ¯Ω0 and +Rn \ ¯Ω0 for any t > 0. + +11 +2.2 +Preliminaries +We first present the comparison principle for the nonlocal version of the two-phase Stefan +problem (1.1) and omit the proof since it is standard. Similarly, the comparison principle is +also valid for the nonlocal version of the one-phase Stefan problem (1.6). +Proposition 2.1. Assume that the conditions of Theorem 1.1 are valid. Also assume that +γ∗ +0 ∈ L∞(Rn), γ∗ +0(x) = −α∗ +0 for x ∈ Rn \ ¯Ω0, α∗ +0 ∈ (0, ℓ0). Let γ∗ denote the solution to the +problem (1.1) with initial data γ∗ +0. If γ∗ +0 ≥ γ0, then γ∗ ≥ γ for all t > 0. +Moreover, we present a type of strong maximum principle for the nonlocal version of one- +phase Stefan problem (1.6). +Proposition 2.2. Under the conditions of Theorem 1.5, given s ≥ 0, we have γ(t, x) > 0 in +Ω(s) for t ≥ s. +Proof. First, we claim that if x ∈ {x ∈ Ω(s) | γ(s, x) > 0}, then for t > s, γ(t, x) > 0. Due to +the continuity of the solution in t, we only need consider the case that s > 0. According to +γt(t, x) = d +� +{γ>0} +k(x − y)γ(t, y)dy − dγ(t, x)χ{γ>0} ≥ −dγ(t, x)χ{γ>0}, +the claim follows immediately. +Next we consider the initial domain ¯Ω0. Set +γ0δ(x) = +� +γ0(x) + δ +x ∈ ¯Ω, +γ0(x) +x ∈ Rn \ ¯Ω0, +where δ > 0, and let γδ denote the solution to the problem (1.6) with the initial data (1.7), +where γ0 is replaced by γ0δ. Thanks to the above claim, one sees that γδ(t, x) > 0 for t > 0, +x ∈ ¯Ω0. By letting δ → 0+, it is routine to derive that γ(t, x) ≥ 0 for t > 0, x ∈ ¯Ω0, i.e., +¯Ω0 ⊆ Ω(t) for t > 0. +Moreover, since γ0|¯Ω0 ≥ 0, γ0|¯Ω0 ̸≡ 0, the claim at the beginning indicates that {x ∈ +¯Ω0 | γ(t, x) > 0} is not empty for t > 0. Suppose that there exists t0 > 0 such that γ(t0, x) +touches zero somewhere in ¯Ω0. By choosing +x0 ∈ ∂{x ∈ ¯Ω0 | γ(t0, x) > 0} +� +{x ∈ ¯Ω0 | γ(t0, x) = 0}, +we have +0 ≥ γt(t0, x0) = d +� +{γ(t0,y)>0} +k(x0 − y)γ(t0, y)dy > 0, +where the strict inequality is due to the assumption (K) and the choice of x0. +This is a +contradiction and thus γ(t, x) > 0 for t > 0, x ∈ ¯Ω0. +It remains to consider the set {x ∈ Ω(s) \ ¯Ω0 | γ(s, x) = 0}, when it is not empty. Fix +x∗ ∈ {x ∈ Ω(s) \ ¯Ω0 | γ(s, x) = 0} and let s1 denote the moment when γ(t, x∗) first touches +zero. Obviously s1 ≤ s and by the equation satisfied by γ, we have +ℓ0 = +� s1 +0 +d +� +{γ(t,y)>0} +k(x∗ − y)γ(t, y)dydt. + +12 +Then obviously there exists t1 ∈ (0, s1) such that +� +{γ(t1,y)>0} +k(x∗ − y)γ(t1, y)dy > 0. +(2.4) +We claim that for any t > t1, +� +{γ(t,y)>0} +k(x∗ − y)γ(t, y)dy > 0. Suppose that the claim is not +true, i.e., there exists t2 > t1 such that +� +{γ(t2,y)>0} +k(x∗ − y)γ(t2, y)dy = 0. +This implies that γ(t2, y) ≤ 0 in the set {y ∈ Rn | k(x∗ − y) > 0}. Again thanks to the claim +at the beginning, we have γ(t1, y) ≤ 0 in the set {y ∈ Rn | k(x∗ − y) > 0}, which contradicts to +(2.4). The claim is proved. +According to this claim and the choice of s, x∗, one sees that +γt(s, x∗) = d +� +{γ(s,y)>0} +k(x∗ − y)γ(s, y)dy > 0. +Hence for t > s ≥ 0, γ(t, x∗) > 0. +At the end, some priori estimates are verified for the nonlocal version of the two-phase +Stefan problem. +Lemma 2.3. Under the assumptions of Theorem 1.1, there exists a constant C1 > 0, which +depends on the initial data only, such that for given 1 ≤ p ≤ ∞, we have +∥γ+(t, ·)∥Lp(Rn) ≤ C1, ∥(γ(t, ·) + ℓ0)−∥Lp(Rn) ≤ C1, +t > 0. +Proof. Notice that if φ ∈ L1(Rn) � L∞(Rn), then for any p > 1, φ ∈ Lp(Rn) and +∥φ∥Lp(Rn) ≤ +� +∥φ∥p−1 +L∞(Rn)∥φ∥L1(Rn) +� 1 +p ≤ +� +∥φ∥L∞(Rn) + 1 +� � +∥φ∥L1(Rn) + 1 +� +. +Hence it suffices to verify the statements for p = 1 and p = ∞. +Indeed, when p = ∞, the conclusion is obvious due to Theorem 1.1, i.e., +∥γ(t, ·)∥L∞(Rn) ≤ ∥γ0∥L∞(Rn). +(2.5) +In order to estimate ∥γ+(t, ·)∥L1(Rn), we first consider the case that both k and η are com- +pactly supported. Let ˆγ(t, x) denote the solution to the problem (1.1) with the initial data +replaced by +ˆγ(0, x) = ∥γ0|¯Ω0∥L∞(Ω0), x ∈ ¯Ω0, +ˆγ(0, x) = −α0, x ∈ Rn \ ¯Ω0. +By Theorem 1.1 and Proposition 2.1, we have +ˆγ(t, x) ≥ γ(t, x), −α0 ≤ ˆγ(t, x) ≤ ∥γ0|¯Ω0∥L∞(Ω0), +t > 0, x ∈ Rn. +(2.6) + +13 +Since ¯Ω0 is bounded and k, η are compactly supported, for t > 0, it is routine to show that +{ˆγ(t, x) ≥ 0} remains bounded. Set +Σ+(t) = +� +0<τ 0 +∥γ+(t, ·)∥L1(Rn) ≤ ∥γ0|¯Ω0∥L∞(Ω0) +� +1 + ∥γ0|¯Ω0∥L∞(Ω0) +α0 +� +|¯Ω0|. +(2.7) +Now consider the case that the kernel functions k and η satisfy the assumption (K), but +are not compactly supported. Then there exists a series of kernel functions kj, ηj, j ≥ 1, which +are compactly supported, satisfy the assumption (K), and +lim +j→∞ ∥kj − kǫ∥L1(Rn) = 0, lim +j→∞ ∥ηj − ηǫ∥L1(Rn) = 0. +Let γj denotes the solution to the problem (1.1) with k replaced by kj and η replaced by ηj . +Similar to the proof of Theorem 1.1, we have +lim +j→∞ ∥γ+ +j − γ+∥L∞(Rn) ≤ lim +j→∞ ∥γj − γ∥L∞(Rn) = 0. +This, together with (2.7), implies that for any R > 0, +� +BR(0) +γ+(t, x)dx = lim +j→∞ +� +BR(0) +γ+ +j (t, x)dx ≤ ∥γ0|¯Ω0∥L∞(Ω0) +� +1 + ∥γ0|¯Ω0∥L∞(Ω0) +α0 +� +|¯Ω0|. +Since R is arbitrary, for any given t > 0, +∥γ+(t, ·)∥L1(Rn) ≤ ∥γ0|¯Ω0∥L∞(Ω0) +� +1 + ∥γ0|¯Ω0∥L∞(Ω0) +α0 +� +|¯Ω0|. +Obviously, ∥(γ(t, ·) + ℓ0)−∥L1(Rn) can be estimated in a similar way. The proof is complete. + +14 +Lemma 2.4. Under the assumptions of Theorem 1.1, we have +� +Rn |γ(t, x + h) − γ(t, x)| dx ≤ +� +Rn |γ0(x + h) − γ0(x)| dx, +t > 0, x ∈ Rn. +Proof. First of all, fix x, h ∈ Rn. For δ ̸= 0, introduce µδ(X) = +� +X2 + δ2� 1 +2 . +According to the problem (1.1) satisfied by γ, it is routine to verify that +∂ +∂tµδ(γ(t, x + h) − γ(t, x)) += +γ(t, x + h) − γ(t, x) +� +(γ(t, x + h) − γ(t, x))2 + δ2� 1 +2 (γ(t, x + h) − γ(t, x))t +≤ +|γ(t, x + h) − γ(t, x)| +� +(γ(t, x + h) − γ(t, x))2 + δ2� 1 +2 +× +� +a +� +Rn k(x − y)|γ+(t, y + h) − γ+(t, y)|dy − a|γ+(t, x + h) − γ+(t, x)| +� ++ +|γ(t, x + h) − γ(t, x)| +� +(γ(t, x + h) − γ(t, x))2 + δ2� 1 +2 · b +� +Rn k(x − y)|(γ + ℓ0)−(t, y + h) − (γ + ℓ0)−(t, y)|dy +− +|γ(t, x + h) − γ(t, x)| +� +(γ(t, x + h) − γ(t, x))2 + δ2� 1 +2 · b|(γ + ℓ0)−(t, x + h) − (γ + ℓ0)−(t, x)|, +which yields that +|γ(t, x + h) − γ(t, x)| − |γ0(x + h) − γ0(x)| += +lim +δ→0 [µδ(γ(t, x + h) − γ(t, x)) − µδ(γ0(x + h) − γ0(x))] += +lim +δ→0 +� t +0 +∂ +∂τ µδ(γ(τ, x + h) − γ(τ, x))dτ +≤ +a +� t +0 +�� +Rn k(x − y)|γ+(τ, y + h) − γ+(τ, y)|dy − |γ+(τ, x + h) − γ+(τ, x)| +� +dτ ++b +� t +0 +� +Rn k(x − y)|(γ + ℓ0)−(τ, y + h) − (γ + ℓ0)−(τ, y)|dydτ +−b +� t +0 +|(γ + ℓ0)−(τ, x + h) − (γ + ℓ0)−(τ, x)|dτ. +Thus for any R > 0, +� +BR(0) +|γ(t, x + h) − γ(t, x)| dx − +� +BR(0) +|γ0(x + h) − γ0(x)| dx +≤ +a +� t +0 +� +BR(0) +�� +Rn k(x − y)|γ+(τ, y + h) − γ+(τ, y)|dy − |γ+(τ, x + h) − γ+(τ, x)| +� +dxdτ ++b +� t +0 +� +BR(0) +� +Rn k(x − y)|(γ + ℓ0)−(τ, y + h) − (γ + ℓ0)−(τ, y)|dydxdτ + +15 +−b +� t +0 +� +BR(0) +|(γ + ℓ0)−(τ, x + h) − (γ + ℓ0)−(τ, x)|dxdτ +≤ +a +� t +0 +�� +Rn |γ+(τ, x + h) − γ+(τ, x)|dx − +� +BR(0) +|γ+(τ, x + h) − γ+(τ, x)|dx +� +dτ ++b +� t +0 +� +Rn |(γ + ℓ0)−(τ, x + h) − (γ + ℓ0)−(τ, x)|dxdτ +−b +� t +0 +� +BR(0) +|(γ + ℓ0)−(τ, x + h) − (γ + ℓ0)−(τ, x)|dxdτ. +Notice that γ0(x + h) − γ0(x) is compactly supported. Then thanks to Lemma 2.3, by letting +R → ∞, we have for t > 0, +� +Rn |γ(t, x + h) − γ(t, x)| dx ≤ +� +Rn |γ0(x + h) − γ0(x)| dx. +The proof is complete. +Remark 2.1. The priori estimates in Lemmas 2.3 and 2.4 are also valid for the nonlocal +version of the one-phase Stefan problem based on the same arguments. In particular, these +estimates play an important role in proving convergence relations between local and nonlocal +Stefan problems. +3 +Convergence to the local Stefan problem +3.1 +Convergence to the two-phase Stefan problem +Theorem 1.3 is about the convergence relations between local and nonlocal two-phase Stefan +problems, where the additional assumptions that the kernel functions are radially symmetric +and compactly supported are required. +Proof of Theorem 1.3. Fix T > 0. For any test function ζ ∈ C∞ +c (Rn × [0, T)), it is routine to +show that +− +� T +0 +� +Rn γǫζtdxdt − +� +Rn γ0(x)ζ(0, x)dx +=a +� T +0 +� +Rn +1 +ǫ2 +�� +Rn kǫ(x − y)ζ(t, y)dy − ζ(t, x) +� +γ+ +ǫ (t, x)dxdt ++ b +� T +0 +� +Rn +1 +ǫ2 +�� +Rn ηǫ(x − y)ζ(t, y)dy − ζ(t, x) +� +(γǫ(t, x) + ℓ0)−dxdt. +(3.1) +First, thanks to the conditions imposed on the kernel functions k and η, and ζ ∈ C∞ +c (Rn × +[0, T), we have +lim +ǫ→0 +1 +ǫ2 +�� +Rn kǫ(x − y)ζ(t, y)dy − ζ(t, x) +� += +lim +ǫ→0 +1 +ǫ2 +� +Rn k(z) (ζ(t, x − ǫz) − ζ(t, x)) dz + +16 += +1 +2 +� +Rn |z|2k(z)dz∆ζ(t, x) +(3.2) +uniformly in t ∈ [0, T), x ∈ Rn, and similarly, +lim +ǫ→0 +1 +ǫ2 +�� +Rn ηǫ(x − y)ζ(t, y)dy − ζ(t, x) +� += 1 +2 +� +Rn |z|2η(z)dz∆ζ +(3.3) +uniformly in t ∈ [0, T), x ∈ Rn. +Now we focus on analyzing the convergence properties of γǫ as ǫ goes to zero. According to +the Fr´echet-Kolmogorov theorem, Lemmas 2.3 and 2.4, for any fixed t ∈ (0, T) and bounded +set Ω ⊆ Rn, {γǫ(t, ·) | 0 < ǫ < 1} is precompact in L1(Ω). Then it is routine to derive that there +exist a sequence {ǫj} with limj→∞ ǫj = 0 and γ(t, ·) ∈ L1(Rn) with t ∈ (0, T) � Q, such that +for any t ∈ (0, T) � Q, +lim +j→∞ γǫj(t, ·) = γ(t, ·) in L1 +loc(Rn). +(3.4) +Due to the uniqueness of weak convergence, this implies that for any 1 < p < +∞, +lim +j→∞ γǫj(t, ·) = γ(t, ·) weakly in Lp +loc(Rn). +Thanks to Theorem 1.1, we have +∥γ(t, ·)∥L∞(Ω) ≤ ∥γ0∥L∞(Rn), +i.e., γ(t, ·) ∈ L∞(Rn) with t ∈ (0, T) � Q. +Next, we claim that there exists γ(t, ·) ∈ L∞(Rn) with t ∈ (0, T) � Qc such that for any +1 < p < +∞, t ∈ (0, T) � Qc, +lim +j→∞ γǫj(t, ·) = γ(t, ·) weakly in Lp +loc(Rn). +(3.5) +To prove this claim, fix t ∈ (0, T) � Qc, 1 < p < ∞ and a bounded set Ω in Rn. Obviously, +there exist a subsequence of the sequence {ǫj}, denoted by {ǫjℓ}, and γΩ(t, ·) ∈ Lp(Ω), such +that +lim +jℓ→∞ γǫjℓ(t, ·) = γΩ(t, ·) weakly in Lp(Ω). +(3.6) +We emphasize that the subsequence {ǫjℓ} depends on t ∈ (0, T) � Qc. Then fix φ(x) ∈ Cc(Ω), +� +Ω +� +γǫj(t, x) − γΩ(t, x) +� +φ(x)dx += +� +Ω +� +γǫj(t, x) − γǫjℓ(t, x) +� +φ(x)dx + +� +Ω +� +γǫjℓ(t, x) − γΩ(t, x) +� +φ(x)dx += +� +Ω +� +γǫj(s, x) − γǫjℓ(s, x) +� +φ(x)dx + +� +Ω +� +γǫjℓ(t, x) − γΩ(t, x) +� +φ(x)dx ++ +� +Ω +� +γǫj(t, x) − γǫj(s, x) +� +φ(x)dx − +� +Ω +� +γǫjℓ(t, x) − γǫjℓ(s, x) +� +φ(x)dx, +(3.7) + +17 +where s ∈ (0, T) � Q. Notice that based on the problem (1.10), one has +� +Rn (γǫ(t, x) − γǫ(s, x)) φ(x)dx += +a +� t +s +� +Rn +1 +ǫ2 +�� +Rn kǫ(x − y)φ(y)dy − φ(x) +� +γ+ +ǫ (τ, x)dxdτ ++b +� t +s +� +Rn +1 +ǫ2 +�� +Rn ηǫ(x − y)φ(y)dy − φ(x) +� +(γǫ(τ, x) + ℓ0)−dxdτ. +Thanks to (1.8) and (3.2), there exists a constant C, which is independent of ǫ > 0, such that +��� +� +Rn (γǫ(t, x) − γǫ(s, x)) φ(x)dx +��� ≤ C|t − s|. +Thanks to this estimate, we can choose s ∈ Q close enough to t to control the last two terms in +(3.7). Hence, together with (3.4) and (3.6), it is standard to show that for any φ(x) ∈ Cc(Ω), +lim +j→∞ +� +Ω +� +γǫj(t, x) − γΩ(t, x) +� +φ(x)dx = 0. +Thus +lim +jℓ→∞ γǫj(t, ·) = γΩ(t, ·) weakly in Lp(Ω). +Since 1 < p < ∞ is arbitrary, thanks to Theorem 1.1, one sees that +∥γΩ(t, ·)∥L∞(Ω) ≤ ∥γ0∥L∞(Rn). +Notice that Ω ⊆ Rn is any fixed bounded set, thus due to the uniqueness of weak convergence, +we can define γ(t, ·) ∈ L∞(Rn) by setting +γ(t, x) = γΩ(t, x) a.e. in Ω. +It follows that +lim +j→∞ γǫj(t, ·) = γ(t, ·) weakly in Lp +loc(Rn). +Since t ∈ (0, T) � Qc is arbitrary, the claim is proved. +Furthermore, we improve the weak convergence in (3.5) to the strong convergence in L1 +loc(Rn). +For this purpose, fix t ∈ (0, T) � Qc and a bounded set Ω ⊆ Rn. Recall that due to the Fr´echet- +Kolmogorov theorem, Lemmas 2.3 and 2.4, {γǫj(t, ·)} is precompact in L1(Ω). Thus thanks to +(3.5) and the uniqueness of weak convergence, it is routine to verify that +lim +j→∞ γǫj(t, ·) = γ(t, ·) in L1(Ω), +i.e., for any t ∈ (0, T) � Qc, +lim +j→∞ γǫj(t, ·) = γ(t, ·) in L1 +loc(Rn). +(3.8) +Therefore, by letting j → ∞, it follows from (3.1), (3.2), (3.3), (3.4) and (3.8) that +� T +0 +� +Rn +� +γζt + +� +Aγ+ + B(γ + ℓ0)−� +∆ζ +� +dxdt + +� +Rn γ0(x)ζ(0, x)dx = 0. +The uniqueness of generalized solution to the problem (1.12) yields the desired conclusion. + +18 +3.2 +Convergence to the one-phase Stefan problem +This subsection is devoted to the proof of Theorem 1.4, where the convergence relations between +local and nonlocal one-phase Stefan problems are verified under the optimal condition (1.9) +imposed on the kernel function. +It is known that the classical one-phase problem (1.5) can be reduced to a parabolic varia- +tional inequality [11, Chapter 1.9]. To be more specific, define +v(t, x) = + + + + + + + + + + + + + +� t +0 +θ(τ, x)dτ +if x ∈ ¯Ω0, +0 +if x ∈ Rn \ ¯Ω0, t ≤ s(x), +� t +s(x) +θ(τ, x)dτ +if x ∈ Rn \ ¯Ω0, t > s(x), +and then transform the problem (1.5) into a variational inequality for the function v(t, x) as +follows + + + + + +vt − A∆v ≥ ¯f +a.e. in (0, T) × Rn, +v ≥ 0 +a.e. in (0, T) × Rn, +(vt − A∆v − ¯f)v = 0 +a.e. in (0, T) × Rn, +(3.9) +where ¯f = γ0 defined in (1.7). It has been proved that there exists a unique solution of the +problem (3.9), still denoted by v(t, x), and +Dxv, D2 +xv, Dtv +belong to L∞((0, T); Lp(Rn)) for p < ∞. +See [11, Chapter 1.9] for details. +Borrowing this idea, define +vǫ(t, x) = +� t +0 +γ+ +ǫ (τ, x)dτ, +(3.10) +Obviously, Theorem 1.4 is about the convergence relations between vǫ and v. +First we compute the equation satisfied by vǫ. For any x ∈ Rn \ ¯Ω0, let sǫ(x) denote the +time if exists when γǫ(t, x) first reaches zero. Thus +ℓ0 = 1 +ǫ2 +� sǫ(x) +0 +� +Rn kǫ(x − y)γ+ +ǫ (τ, y)dydτ. +(3.11) +• if x ∈ ¯Ω0, t > 0, then +vǫt − 1 +ǫ2 +� +Rn kǫ(x − y)vǫ(t, y)dy + 1 +ǫ2vǫ(t, x) += +γ+ +ǫ (t, x) − 1 +ǫ2 +� +Rn kǫ(x − y) +� t +0 +γ+ +ǫ (τ, y)dτdy + 1 +ǫ2 +� t +0 +γ+ +ǫ (τ, x)dτ += +� t +0 +γ+ +ǫt(τ, x)dτ + γ+ +ǫ (0, x) − 1 +ǫ2 +� +Rn kǫ(x − y) +� t +0 +γ+ +ǫ (τ, y)dτdy + 1 +ǫ2 +� t +0 +γ+ +ǫ (τ, x)dτ += +γ0(x); + +19 +• if x ∈ Rn \ ¯Ω0, 0 < t ≤ sǫ(x), then vǫ(t, x) = 0. Thus +vǫt − 1 +ǫ2 +� +Rn kǫ(x − y)vǫ(t, y)dy + 1 +ǫ2vǫ(t, x) = − 1 +ǫ2 +� +Rn kǫ(x − y)vǫ(t, y)dy; +• if x ∈ Rn \ ¯Ω0, t > sǫ(x), then +vǫt − 1 +ǫ2 +� +Rn kǫ(x − y)vǫ(t, y)dy + 1 +ǫ2vǫ(t, x) += +γ+ +ǫ (t, x) − 1 +ǫ2 +� +Rn kǫ(x − y) +� t +0 +γ+ +ǫ (τ, y)dτdy + 1 +ǫ2 +� t +0 +γ+ +ǫ (τ, x)dτ += +� t +sǫ(x) +γ+ +ǫt(τ, x)dτ − 1 +ǫ2 +� +Rn kǫ(x − y) +� t +sǫ(x) +γ+ +ǫ (τ, y)dτdy + 1 +ǫ2 +� t +sǫ(x) +γ+ +ǫ (τ, x)dτ +− 1 +ǫ2 +� +Rn kǫ(x − y) +� sǫ(x) +0 +γ+ +ǫ (τ, y)dτdy += +−ℓ0, +according to (3.11). +Hence one sees that vǫ satisfies + + + +vǫt(t, x) = 1 +ǫ2 +� +Rn kǫ(x − y)vǫ(t, y)dy − 1 +ǫ2vǫ(t, x) + fǫ(t, x) +t > 0, x ∈ Rn, +vǫ(0, x) = 0 +x ∈ Rn, +(3.12) +where +fǫ(t, x) = + + + + + + + +γ0(x) +t > 0, x ∈ ¯Ω0, +− +� +Rn +1 +ǫ2kǫ(x − y)vǫ(t, y)dy +0 < t ≤ sǫ(x), x ∈ Rn \ ¯Ω0, +−ℓ0 +t > sǫ(x), x ∈ Rn \ ¯Ω0. +Secondly, we prepare some useful estimates about fǫ. +Lemma 3.1. Assume that in the problem (1.13), the kernel function k satisfies the assumption +(K), the initial data satisfies (1.2) and u0 ≥ 0. Then for given 1 ≤ p ≤ ∞, fǫ(t, x) is uniformly +bounded in Lp(Rn) for any ǫ > 0, t > 0. +Proof. Similar to the proof of Lemma 2.3, if φ ∈ L1(Rn) � L∞(Rn), then for any p > 1, +φ ∈ Lp(Rn) and +∥φ∥Lp(Rn) ≤ +� +∥φ∥p−1 +L∞(Rn)∥φ∥L1(Rn) +� 1 +p ≤ +� +∥φ∥L∞(Rn) + 1 +� � +∥φ∥L1(Rn) + 1 +� +. +Hence it suffices to verify the conclusion for p = 1 and p = ∞. +Since fǫ(t, x) = γ0 for x ∈ ¯Ω0 and t > 0, we only need to estimate fǫ outside ¯Ω0. It mainly +relies on the following estimates: +− fǫ(t, x) ∈ [0, ℓ0] for x ∈ Rn \ ¯Ω0, +� +Rn\¯Ω0 +−fǫ(t, x)dx ≤ +� +Ω0 +γ0dx. +(3.13) + +20 +Assume that (3.13) holds. It immediately yields that +∥fǫ(t, ·)∥L∞(Rn) ≤ max +� +∥γ0|¯Ω0∥L∞(Ω0), ℓ0 +� +, +∥fǫ(t, ·)∥L1(Rn) ≤ 2 +� +Ω0 +γ0dx. +The desired conclusion follows. +Now it remains to verify (3.13). In fact, due to (3.11), the first estimate in (3.13) is obvious. +Intuitively, the second estimate in (3.13) indicates that +� +Rn\¯Ω0 +−fǫ(t, x)dx is less than the total +energy absorbed outside ¯Ω0 from time 0 to t, which can not exceed the total energy at the +initial time, i.e. +� +Ω0 +γ0dx. To be more precise, by (1.13), one has for any large R > 0 +� +BR(0)\¯Ω0 +(γǫ(t, x) − γǫ(0, x)) dx += +� +(BR(0)\¯Ω0) �{sǫ(x) 0, there exist a sequence {ǫℓ}, which +depends on t and satisfies limℓ→∞ ǫℓ = 0, and ˜vt ∈ L1(Rn) such that vǫℓ(t, ·) → ˜vt(·) a.e. in Rn. +Proof. Thanks to Lemma 2.4, +� +Rn |vǫ(t, x + h) − vǫ(t, x)| dx = +� +Rn +���� +� t +0 +� +γ+ +ǫ (τ, x + h) − γ+ +ǫ (τ, x) +� +dτ +���� dx +≤ +� t +0 +� +Rn |γǫ(τ, x + h) − γǫ(τ, x)| dxdτ +≤ +� t +0 +� +Rn |γ0(x + h) − γ0(x)| dxdτ = t +� +Rn |γ0(x + h) − γ0(x)| dx. +This, together with the Fr´echet-Kolmogorov theorem and Lemma 2.3, indicates that for any +fixed t > 0 and bounded set Ω ⊆ Rn, {vǫ(t, ·) | 0 < ǫ < 1} is precompact in L1(Ω). Then it is +easy to show that there exist a sequence {ǫℓ} with limℓ→∞ ǫℓ = 0 and ˜vt ∈ L1(Rn) such that +vǫℓ(t, ·) → ˜vt(·) in L1 +loc(Rn) and vǫℓ(t, ·) → ˜vt(·) a.e. in Rn. +We emphasize that so far the additional condition (1.9) has not been used yet. +After +previous preparations, we are ready to complete the proof of Theorem 1.4. +Proof of Theorem 1.4. From now on, fix T > 0. Back to the problem (3.12) satisfied by vǫ, by +the Fourier transform and the property ˆkǫ(ξ) = ˆk(ǫξ), we derive that +ˆvǫ(t, ξ) = +� t +0 +e +1 +ǫ2(ˆk(ǫξ)−1)(t−τ) ˆfǫ(τ, ξ)dτ. +(3.15) +Due to the Parseval formula, +∥fǫ(t, ·)∥L2(Rn) = ∥ ˆfǫ(t, ·)∥L2(Rn). +Then thanks to Lemma 3.1, there exists a sequence {ǫj} with limj→∞ ǫj = 0 and f0, G0 ∈ +L2((0, T) × Rn) such that +lim +j→∞ fǫj = f0 weakly in L2((0, T) × Rn). +(3.16) +and +lim +j→∞ +ˆfǫj = G0 weakly in L2((0, T) × Rn). +(3.17) +Notice that for any test function ψ(t, ξ) ∈ Cc((0, T) × Rn), on the one side, due to (3.16), +lim +j→∞ +� T +0 +� +Rn +ˆfǫj(t, ξ)ψ(t, ξ)dξdt + +22 += +lim +j→∞ +� T +0 +� +Rn +�� +Rn e−ix·ξfǫj(t, x)dx +� +ψ(t, ξ)dξdt += +lim +j→∞ +� T +0 +� +Rn +�� +Rn e−ix·ξψ(t, ξ)dξ +� +fǫj(t, x)dxdt += +� T +0 +� +Rn +�� +Rn e−ix·ξψ(t, ξ)dξ +� +f0(t, x)dxdt = +� T +0 +� +Rn +ˆf0(t, ξ)ψ(t, ξ)dξdt. +On the other side, (3.17) yields that +lim +j→∞ +� T +0 +� +Rn +ˆfǫj(t, ξ)ψ(t, ξ)dξdt = +� T +0 +� +Rn G(t, ξ)ψ(t, ξ)dξdt. +Hence +G0(t, ξ) = ˆf0 a.e. in (0, T) × Rn, +i.e. +lim +j→∞ fǫj = f0, +lim +j→∞ +ˆfǫj = ˆf0 weakly in L2((0, T) × Rn). +(3.18) +Introduce the following problem +� +vt = A∆v + f0 +0 < t ≤ T, x ∈ Rn, +v(0, x) = 0 +x ∈ Rn, +(3.19) +where v∗ denote the unique generalized solution in V 1,1/2 +2 +(Rn × [0, T]) [12, Chapter III.5]. By +applying the Fourier transform to the problem (3.19), we derive that +ˆv∗(t, ξ) = +� t +0 +e−A|ξ|2(t−τ) ˆf0(τ, ξ)dτ. +Fix t ∈ (0, T). For any given φ(ξ) ∈ C∞ +c (Rn), +lim +j→∞ +� +Rn ˆvǫj(t, ξ)φ(ξ)dξ = lim +j→∞ +� +Rn +�� t +0 +e +1 +ǫ2 +j (ˆk(ǫjξ)−1)(t−τ) ˆfǫj(τ, ξ)dτ +� +φ(ξ)dξ += +lim +j→∞ +� +Rn +� t +0 +� +e +1 +ǫ2 +j (ˆk(ǫjξ)−1)(t−τ) +− e−A|ξ|2(t−τ) +� +ˆfǫj(τ, ξ)φ(ξ)dτdξ ++ lim +j→∞ +� +Rn +� t +0 +e−A|ξ|2(t−τ) ˆfǫj(τ, ξ)φ(ξ)dτdξ. +Since ∥ ˆfǫj(τ, ·)∥L∞(Rn) ≤ ∥fǫj(τ, ·)∥L1(Rn), due to Lemma 3.1, the assumption (1.9) and (3.18), +we have +lim +j→∞ +� +Rn ˆvǫj(t, ξ)φ(ξ)dξ = +� +Rn +� t +0 +e−A|ξ|2(t−τ) ˆf0(τ, ξ)φ(ξ)dτdξ = +� +Rn ˆv∗(t, ξ)φ(ξ)dξ. +(3.20) +Moreover, thanks to Lemma 2.3, there exists a subsequence of {ǫj}, denoted by {ǫjℓ}, and +vt +0 in L2(Rn), such that vǫjℓ(t, ·) ⇀ vt +0(·) in L2(Rn). Then for any given φ(ξ) ∈ C∞ +c (Rn), +lim +jℓ→∞ +� +Rn ˆvǫjℓ(t, ξ)φ(ξ)dξ + +23 += +lim +jℓ→∞ +� +Rn +�� +Rn e−ix·ξvǫjℓ(t, x)dx +� +φ(ξ)dξ += +lim +jℓ→∞ +� +Rn +�� +Rn e−ix·ξφ(ξ)dξ +� +vǫjℓ(t, x)dx = +� +Rn +�� +Rn e−ix·ξφ(ξ)dξ +� +vt +0(x)dx += +� +Rn +�� +Rn e−ix·ξvt +0(x)dx +� +φ(ξ)dξ. +(3.21) +Now (3.20) and (3.21) implies that +ˆv∗(t, ξ) = +� +Rn e−ix·ξvt +0(x)dx a.e. in Rn. +Thus v∗(t, x) = vt +0(x) a.e. in Rn. Since v∗(t, x) is the unique solution to the problem (3.19), +it follows immediately that for any 0 < t < T, vǫ(t, ·) ⇀ v∗(t, ·) in L2(Rn) as ǫ → 0. This, +together with Lemma 3.2, implies that +vǫ(t, x) → v∗(t, x) a.e. in (0, T) × Rn as ǫ → 0. +(3.22) +To complete the proof of Theorem 1.4, it remains to verify that v∗ satisfies the parabolic +variational inequality (3.9) as follows. + + + + + +vt − A∆v ≥ ¯f +a.e. in (0, T) × Rn, +v ≥ 0 +a.e. in (0, T) × Rn, +(vt − A∆v − ¯f)v = 0 +a.e. in (0, T) × Rn, +where ¯f = γ0 for x ∈ Rn. Obviously v∗ ≥ 0 satisfies the first two inequalities in (3.9) since vǫ +is always non-negative and fǫ ≥ ¯f for all t > 0 and x ∈ Rn. Moreover, thanks to Lemma 3.1, +(3.16) and the uniqueness of weak convergence, it is standard to show that f0 ∈ Lp(Rn ×[0, T]) +for any p > 1. +Then by parabolic regularity theory and Sobolev embedding theorem, one +obtains that v∗(t, ·) is continuous in Rn. Thus, the set {v∗ > 0} is open in (0, T) × Rn. Also +notice that fǫ = ¯f if vǫ > 0. Hence thanks to (3.22), it is standard to verify that +fǫ(t, x) → ¯f(t, x) a.e. in {v∗ > 0} as ǫ → 0. +Thus due to (3.18), f0 = ¯f a.e. in {v∗ > 0}, i.e., v∗ satisfies the third equality in (3.9). +The proof of Theorem 1.4 is complete. +4 +Fundamental properties of nonlocal Stefan problem +In this section, we investigate the fundamental properties of the nonlocal version of one-phase +Stefan problem (1.6) + + + +γt(t, x) = d +� +Rn k(x − y)γ+(t, y)dy − dγ+(t, x) +t > 0, x ∈ Rn, +γ(0, x) = γ0 +x ∈ Rn. + +24 +4.1 +Expansion and boundedness +Theorem 1.5(i) is about the expansion of Ω(t). +Proof of Theorem 1.5(i). Fix x ∈ Rn\ ¯Ω0. Let t = s(x) denote the moment when γ(s(x), x) = 0 +while γ(t, x) < 0 for 0 < t < s(x). By (1.6), one has +ℓ0 = d +� s(x) +0 +� +Rn k(x − y)γ+(τ, y)dydτ = d +� s(x) +0 +� +Ω(τ) +k(x − y)γ+(τ, y)dydτ. +(4.1) +Also thanks to Theorem 1.1, 0 ≤ γ+ ≤ ∥γ0|¯Ω0∥C(¯Ω0). This yields that +ℓ0 ≤ d +� s(x) +0 +� +Ω(τ) +k(x − y)∥γ0|¯Ω0∥C(¯Ω0)dydτ ≤ ds(x)∥γ0|¯Ω0∥C(¯Ω0), +i.e. s(x) ≥ ℓ0/ +� +d∥γ0|¯Ω0∥C(¯Ω0) +� +. Hence by choosing t0 < ℓ0/ +� +d∥γ0|¯Ω0∥C(¯Ω0) +� +, one has Ω(t) = Ω(0) +for 0 ≤ t ≤ t0. +The rest follows directly from Proposition 2.2. +In the following, we prove Theorem 1.5(ii), which is about the uniform boundedness of Ω(t). +Proof of Theorem 1.5(ii). The proof is lengthy. To begin with, we introduce the first auxiliary +1−dim problem + + + + + + + +γt(t, x1) = d +� +R +k1(x1 − y1)γ+(t, y1)dy1 − dγ+(t, x1) +t > 0, x1 ∈ R, +γ(0, x1) = ∥γ0|¯Ω0∥C(¯Ω0) +0 ≤ x1 ≤ M, +γ(0, x1) = −ℓ0 +x1 < 0 or x1 > M, +(4.2) +where k1(x1) = +� +Rn−1 k(x1, x′)dx′, x′ = (x2, ..., xn) and choose the constant M such that +¯Ω0 ⊆ {x ∈ Rn | 0 < x1 < M, where x = (x1, ..., xn)}. +Such M exists since ¯Ω0 is bounded. Let γ1(t, x1) denote the solution to the problem (4.2). +Notice that γ1(t, x1) also satisfies the n−dim problem (1.6) with initial data +γ0(x) = +� +∥γ0|¯Ω0∥C(¯Ω0) +0 ≤ x1 ≤ M, +−ℓ0 +x1 < 0 or x1 > M, x = (x1, ..., xn). +Denote +Σ1(t) = {x1 ∈ R | γ1(t, x1) ≥ 0} and Σ∞ +1 = +� +t≥0 +Σ1(t). +By Proposition 2.1, γ1(t, x1) ≥ γ(t, x) in Rn, where γ denote the solution to the n−dim problem +(1.6) with initial data (1.7) and x = (x1, ..., xn). + +25 +To prove Theorem 1.5(ii), it suffices to show that Σ∞ +1 +is bounded, since the other n − 1 +directions can be handled similarly and thus Ω(t) will be constrained by a bounded cube. +We first show that |Σ∞ +1 | is bounded. Thanks to Lemma 2.3, γ+ +1 (t, ·) ∈ L1(R). By direct +computation, for 0 < t < T +� +Σ1(T) +γ1t(t, x1)dx1 += +d +� +Σ1(T) +� +R +k1(x1−y1)γ+ +1 (t, y1)dy1dx1 − d +� +Σ1(T) +γ+ +1 (t, x1)dx1 +≤ +d +� +R +γ+ +1 (t, y1)dy1 − d +� +Σ1(T) +γ+ +1 (t, x1)dx1 = 0. +Thus +0 ≤ +� +Σ1(T) +γ1(T, x1)dx1 ≤ +� +Σ1(T) +γ1(0, x1)dx1 = −ℓ0|Σ1(T) \ [0, M]| + ∥γ0|¯Ω0∥C(¯Ω0)M, +which implies that +|Σ1(T)| ≤ +� +1 + ∥γ0|¯Ω0∥C(¯Ω0) +ℓ0 +� +M. +Since T is arbitrary, one has +|Σ∞ +1 | ≤ +� +1 + ∥γ0|¯Ω0∥C(¯Ω0) +ℓ0 +� +M. +(4.3) +Next we will show that γ1(t, x1) decays exponentially as t goes to infinity. For this purpose, +we introduce the second auxiliary 1−dim problem with periodic initial data + + + + + + + +γt = d +� +R +k1(x1 − y1)γ+(t, y1)dy1 − dγ+(t, x1) +t > 0, x1 ∈ R, +γ(0, x1) = ∥γ0|¯Ω0∥C(¯Ω0) +κ(M + L) ≤ x1 ≤ κ(M + L) + M, +γ(0, x1) = −ℓ0 +κ(M + L) + M < x1 < (κ + 1)(M + L), +where κ ∈ Z, L > 0 is a constant to be determined later and let ˜γ1(t, x1) denote the solution. +By Proposition 2.1, +γ1(t, x1) ≤ ˜γ1(t, x1) for t > 0, x1 ∈ R, +(4.4) +Obviously, ˜γ1(t, x1) is periodic in x1 with period M +L. Thus this problem can be rewritten +as follows + + + + + + + + + +γt = d +� M+L +0 +k∗(x1−y1)γ+(t, y1)dy1−dγ+(t, x1) +t > 0, x1 ∈ (0, M + L), +γ(0, x1) = ∥γ0|¯Ω0∥C(¯Ω0) +0 ≤ x1 ≤ M, +γ(0, x1) = −ℓ0 < 0 +M < x1 < (M + L), +(4.5) + +26 +where +k∗(x1) = +� +κ∈Z +k1(x1 + κ(M + L)) and +� M+L +0 +k∗(x1)dx1 = 1. +Denote +˜Σ1(t) = {x1 ∈ R | ˜γ1(t, x1) ≥ 0} and ˜Σ∞ +1 = +� +t≥0 +˜Σ1(t). +We claim that if L > ∥γ0|¯Ω0∥C(¯Ω0) +ℓ0 +M, then | ˜Σ∞ +1 +� (0, M + L) | < M + L. +In (4.5), fix T > 0, by direct computation, one has for 0 < t < T, +� +˜Σ1(T) � (0,M+L) +˜γ1t(t, x1)dx1 += +d +� +˜Σ1(T) � (0,M+L) +� M+L +0 +k∗(x1−y1)˜γ+ +1 (t, y1)dy1dx1 − d +� +˜Σ1(T) � (0,M+L) +˜γ+ +1 (t, x1)dx1 +≤ +d +� M+L +0 +˜γ+ +1 (t, y1)dy1 − d +� +˜Σ1(T) � (0,M+L) +˜γ+ +1 (t, x1)dx1 = 0. +Thus +0 +≤ +� +˜Σ1(T) � (0,M+L) +˜γ1(T, x1)dx1 +≤ +� +˜Σ1(T) � (0,M+L) +˜γ1(0, x1)dx1 = −ℓ0|˜Σ1(T) +� +(M, M + L)| + ∥γ0|¯Ω0∥C(¯Ω0)M. +This implies that +|˜Σ1(T) +� +(M, M + L)| ≤ ∥γ0|¯Ω0∥C(¯Ω0) +ℓ0 +M. +(4.6) +Since T is arbitrary, it is easy to see that | ˜Σ∞ +1 +� (0, M + L) | < M + L provided that +L > ∥γ0|¯Ω0∥C(¯Ω0) +ℓ0 +M. +The claim is proved. +Thanks to the strong maximum principle established in Proposition 2.2, [0, M] ⊆ ˜Σ∞ +1 and +˜Σ∞ +1 is open in (M, M + L). Thus when fix L > ∥γ0|¯Ω0∥C(¯Ω0) +ℓ0 +M, by the previous claim, one sees +that there exists an open interval (a, b) ⊂ (0, M + L) satisfying (a, b) +� ˜Σ∞ +1 = ∅. If necessary, +we could choose b − a smaller such that k∗(b − a) > 0. Denote +˜ΣD = (0, a) +� +(b, M + L) ⊆ ˜Σ∞ +1 . + +27 +Under the condition that k∗(b − a) > 0, the proof of [13, Theorem 2.6 (i)] can be slightly +modified to show that the eigenvalue problem +d +� +˜ΣD +k∗(x1 − y1)φ(y1)dy1 − dφ(x1) = λφ(x1) +for x1 ∈ ˜ΣD +admits a principal eigenvalue λp with the corresponding eigenfunction φp satisfying φp > 0 in +˜ΣD and then it is easy to see that λp < 0. Moreover, notice that v(t, x1) = ℓeλptφp(x1), ℓ > 0, +satisfies the following problem + + + +vt(t, x1) = d +� +˜ΣD +k∗(x1 − y1)v(t, y1)dy1 − dv(t, x1) +t > 0, x1 ∈ ˜ΣD, +v(0, x1) = ℓφp(x1) +x1 ∈ ˜ΣD. +Choose ℓ large enough such that v(0, x1) > ∥γ0|¯Ω0∥C(¯Ω0) in ˜ΣD. By the comparison principle, +it follows that +˜γ1(t, x1) ≤ v(t, x1) = ℓeλptφp(x1) +for t > 0, x1 ∈ ˜ΣD. +Therefore by (4.4), the choice of ˜ΣD and the fact that ˜γ1(t, x1) is periodic in x1 with period +M + L, we have +γ1(t, x1) ≤ ℓeλpt∥φp∥L∞(˜ΣD) +for t > 0, x1 ∈ R, +(4.7) +i.e., γ1(t, x1) decays exponentially at infinity since λp < 0. +Now we are ready to complete the last piece of the proof of Theorem 1.5(ii). Suppose that +Σ∞ +1 is unbounded, i.e., there exists a sequence {x1i}i≥1 ⊆ Σ∞ +1 and {s1i}i≥1 with |x1i| → ∞ as +i → ∞ such that +ℓ0 = d +� s1i +0 +� +R +k1(x1i − y1)γ+ +1 (τ, y1)dy1dτ, +where t = s1i denote the moment when γ1(s1i, x1i) = 0 while γ1(t, x1i) < 0 for 0 < t < s1i. +To derive a contradiction, we need the following property +lim +|x1|→∞ +� +Σ∞ +1 +k1(x1 − y1)dy1 = 0, +(4.8) +which follows from the facts that k1 ∈ L1(R) and |Σ∞ +1 | < +∞ due to (4.3). +Thanks to (4.7) and (4.8), it follows that +d +� ∞ +0 +� +R +k1(x1i − y1)γ+ +1 (τ, y1)dy1dτ +≤ +d +� ∞ +0 +� +Σ1(τ) +k1(x1i − y1)ℓeλpτ∥φp∥L∞(˜ΣD)dy1dτ +≤ +dℓ +−λp +∥φp∥L∞(˜ΣD) +� +Σ∞ +1 +k1(x1i − y1)dy1 → 0 +as i → ∞. +This contradicts to the existence of s1i when i is large enough. Therefore, Σ∞ +1 is bounded and +the desired conclusion follows. + +28 +4.2 +Continuous expansion and jumping phenomena +We first verify Theorem 1.6, which is about the continuous expansion of Ω(t) under the extra +conditions that the initial domain ¯Ω0 is convex and the kernel function k satisfies (K1). +Proof of Theorem 1.6. Suppose that Ω(t) first jumps at time t = T, i.e., Ω(t) is connected for +t < T while Ω(T) is disconnected. Let Ω1(T) denote the connected domain which contains +Ω(t) for t < T. Choose yT ∈ Ω(T) \ Ω1(T). Since Ω(0) = ¯Ω0 is convex, there exists a unique +xT ∈ ∂Ω(0) such that +|xT − yT| = dist{yT, Ω(0)}. +Moreover, there exists zT , which lies on the line segment xT yT and satisfies zT ̸∈ Ω(T). Let ℓ +denote the line which passes through (zT + yT)/2 and perpendicular to the line segment xT yT. +W.l.o.g., assume that ℓ = {x ∈ Rn | x1 = 0}, where x = (x1, x2, ..., xn) and xT1 < 0, where +xT = (xT1, xT2, ..., xTn). Since Ω(0) is convex, obviously, dist{ℓ, Ω(0)} > 0. +For simplicity, denote +Rn +− = {x ∈ Rn | x1 < 0}, Rn ++ = {x ∈ Rn | x1 > 0}, ˜x = (−x1, x2, ..., xn), +and set +w(t, x) = γ(t, x) − γ(t, ˜x), x ∈ Rn +−. +Then yT = ˜zT and +w(T, zT) = γ(T, zT ) − γ(T, yT) < 0. +(4.9) +Next it is standard to compute that for x ∈ Rn +−, +wt(t, x) = γt(t, x) − γt(t, ˜x) += +d +� +Rn k(x − y)γ+(t, y)dy − d +� +Rn k(˜x − y)γ+(t, y)dy − dγ+(t, x)) + dγ+(t, ˜x) += +d +� +Rn +− +k(x − y)γ+(t, y)dy + d +� +Rn ++ +k(x − y)γ+(t, y)dy +−d +� +Rn +− +k(˜x − y)γ+(t, y)dy − d +� +Rn ++ +k(˜x − y)γ+(t, y)dy − c(t, x)w(t, x) += +d +� +Rn +− +k(x − y)γ+(t, y)dy + d +� +Rn +− +k(x − ˜y)γ+(t, ˜y)dy +−d +� +Rn +− +k(˜x − y)γ+(t, y)dy − d +� +Rn +− +k(˜x − ˜y)γ+(t, ˜y)dy − c(t, x)w(t, x) += +� +Rn +− +[k(x − y) − k(˜x − y)] c(t, y)w(t, y)dy − c(t, x)w(t, x), +where +c(t, x) = dγ+(t, x) − dγ+(t, ˜x) +γ(t, x) − γ(t, ˜x) +, +and k(x − y) − k(˜x − y) ≥ 0 for x, y ∈ Rn +− since k(x) is decreasing in |x|. Moreover, for x ∈ ℓ, +w(t, x) = 0, and for x ∈ Rn +−, +w(0, x) = γ(0, x) − γ(0, ˜x) ≥ 0, + +29 +since Ω(0) ⊆ Rn +−. Thus by the comparison principle, one has w(t, x) ≥ 0 for t > 0, x ∈ Rn +−, +which contradicts to (4.9). The proof is complete. +Notice that in Theorem 1.6, extra conditions on kernel functions and initial domains are +needed to guarantee the continuous expansion of Ω(t). Now we construct two examples to show +that when one of these two extra conditions in Theorem 1.6 is violated, jumping phenomena +could happen. +Example 1. This example is about the assumption (K1) on kernel functions. +For simplicity, we focus on the the one dimensional case and assume that the initial domain +is an internal. According to Theorem 1.6, if the kernel function k(x) is decreasing in |x|, then +Ω(t) expands continuously. +On the contrary, in this example, we choose a kernel function, +which is not decreasing in |x|, and jumping phenomena happens. +Define +k∗(x) = + + + +1 +4σ +1 − σ ≤ |x| ≤ 1 + σ, +0 +otherwise, +where 0 < σ < 1 +4 is small. Consider the problem + + + + + + + +γt(t, x) = +� +R +kj(x − y)γ+(t, y)dy − γ+(t, x) +t > 0, x ∈ R, +γ(0, x) = c0 +x ∈ +� +−1 +4, 1 +4 +� +, +γ(0, x) = −ℓ0 +x ∈ R \ +� +−1 +4, 1 +4 +� +, +(4.10) +where c0, ℓ0 are positive constants, kj satisfies the assumption (K) and +lim +j→∞ ∥kj − k∗∥L1(Rn) = 0. +Let γj denote the solution to the problem (4.10). We claim that if 2ℓ0 < c0, 0 < σ < 1 +4, then +the jumping phenomena happens for (4.10) when j is sufficiently large. +To prove the cliam, first consider the problem + + + + + + + +γt(t, x) = +� +R +k∗(x − y)γ+(t, y)dy − γ+(t, x) +t > 0, x ∈ R, +γ(0, x) = c0 +x ∈ +� +−1 +4, 1 +4 +� +, +γ(0, x) = −ℓ0 +x ∈ R \ +� +−1 +4, 1 +4 +� +. +(4.11) +The existence and uniqueness of the solution, denoted by γ∗, to this problem can be verified by +similar arguments in the proof of Theorem 1.1. Moreover, similar to the proof of (2.3) in the +proof of Theorem 1.1, one has +lim +j→∞ ∥γj − γ∗∥L∞(Rn) = 0. +Hence it suffices to show that the jumping phenomena happens in the limiting problem (4.11) +if 2ℓ0 < c0, 0 < σ < 1 +4. + +30 +Let t1 denote the moment when γ∗ first touches zero somewhere in R \ (−1 +4, 1 +4). For x ∈ +� +−1 +4, 1 +4 +� +, 0 < t < t1, it is easy to see that +� +Rn k∗(x − y)γ+ +∗ (t, y)dy = 0 due to the definition of +k∗ and the choice of σ. Thus +� +(γ+ +∗ )t(t, x) = −γ+ +∗ (t, x) +0 < t < t1, x ∈ +� +−1 +4, 1 +4 +� +, +γ+ +∗ (0, x) = c0 +x ∈ +� +−1 +4, 1 +4 +� +, +thus +γ+ +∗ (t, x) = c0e−t for 0 < t < t1, x ∈ +� +−1 +4, 1 +4 +� +. +Then for any x∗ ∈ {x ∈ R \ +� +−1 +4, 1 +4 +� +| γ∗(t1, x) = 0}, we compute +ℓ0 = +� t1 +0 +� +1 +4 +− 1 +4 +k∗(x∗ − y)c0e−tdydt = c0 +� +1 − e−t1� � +1 +4 +− 1 +4 +k∗(x∗ − y)dy. +(4.12) +According to the definition of k∗, it is routine to verify that +� +1 +4 +− 1 +4 +k∗(x∗ − y)dy ≤ 1 +2 and +� +1 +4 +− 1 +4 +k∗(x∗ − y)dy = 1 +2 if and only if x∗ ∈ +� +−5 +4 + σ, −3 +4 − σ +� � �3 +4 + σ, 5 +4 − σ +� +. +Hence when 2ℓ0 < c0, one has +� +x ∈ R \ +� +−1 +4, 1 +4 +� ��� γ(t1, x) = 0 +� += +� +−5 +4 + σ, −3 +4 − σ +� � �3 +4 + σ, 5 +4 − σ +� +, +where +t1 = − ln +� +1 − 2ℓ0 +c0 +� +. +Therefore, the jumping phenomena happens in the problem (4.11) provided that 0 < σ < 1 +4 +and 2ℓ0 < c0. +Example 2. This example is about the conditions on the shape of initial domains. +To emphasize the effect of initial domains, we still require that the kernel functions con- +structed in this example satisfy the requirements for kernel functions in Theorem 1.6. Then +according to Theorem 1.6, if the initial domain is convex, Ω(t) expands continuously. How- +ever, in the following constructed example, the initial domain is non-convex and the jumping +phenomena happens. +Define +˜k(x) = +� +2−nω−1 +n +|x| ≤ 2, +0 +otherwise, + +31 +where ωn denotes the volume of a unit ball in Rn. Consider the problem + + + + + + + +γt(t, x) = +� +Rn +˜kj(x − y)γ+(t, y)dy − γ+(t, x) +t > 0, x ∈ Rn, +γ(0, x) = c0 +x ∈ ¯Ω0, +γ(0, x) = −ℓ0 +x ∈ Rn \ ¯Ω0, +(4.13) +where c0, ℓ0 are positive constants, n ≥ 2, ¯Ω0 = ¯B2(0) \ B1(0), the kernel function ˜kj satisfies +the conditions for kernel functions in Theorem 1.6 and +lim +j→∞ ∥˜kj − ˜k∥L1(Rn) = 0. +The existence of such kernel functions is obvious. We claim that if (1 − 2−n) c0 > ℓ0, then for +j sufficiently large, the jumping phenomena happens for (4.13). +Similar to Example 1, to prove this claim, it suffices to show the jumping phenomena +happens for the following model + + + + + + + +γt(t, x) = +� +Rn +˜k(x − y)γ+(t, y)dy − γ+(t, x) +t > 0, x ∈ Rn, +γ(0, x) = c0 +x ∈ ¯Ω0, +γ(0, x) = −ℓ0 +x ∈ Rn \ ¯Ω0, +(4.14) +if (1 − 2−n) c0 > ℓ0. +Now let ˜γ denote the solution to the problem (4.14) and t2, if exists, denote the moment +when the solution ˜γ first touches zero somewhere in Rn \ ¯Ω0. When 0 < t < t2, thanks to the +definition of ˜k, it is easy to check that for x ̸= 0, +� +Rn +˜k(x − y)˜γ+(t, y)dy = +� +¯B2(0)\B1(0) +˜k(x − y)˜γ(t, y)dy < +� +¯B2(0)\B1(0) +˜k(−y)˜γ(t, y)dy. +This indicates that if t2 < +∞, then at t = t2, ˜γ touches zero only at x = 0, i.e., the jumping +phenomena happens. +It remains to show the existence of t2 < +∞. +Suppose that t2 = +∞. +Based on the +definition of ˜k and the first equation in (4.14), it is easy to see that +ℓ0 ≥ +� +∞ +0 +� +¯B2(0)\B1(0) +˜k(−y)˜γ+(t, y)dydt > +� +∞ +0 +� +1 − 2−n� +c0e−tdt = +� +1 − 2−n� +c0 > ℓ0. +This is impossible. The proof is complete. +A +Important equivalent characterization +In this appendix, we include the proof of Proposition 1.2. + +32 +Proof of Proposition 1.2. Assume that (i) holds. For clarity, set w = (w1, ..., wn) = ξ +|ξ|. Then +we compute as follows. +1 − ˆk(ξ) +|ξ|2 += +1 +|ξ|2 +� +1 − +� +Rn e−ix·ξk(x)dx +� += +1 +|ξ|2 +� +Rn ix · w +� |ξ| +0 +e−(ix·w)ηdηk(x)dx += +1 +|ξ|2 +� +Rn ix · w +� |ξ| +0 +� +e−(ix·w)η − 1 +� +dηk(x)dx += +1 +|ξ|2 +� +Rn(x · w)2 +� |ξ| +0 +� η +0 +e−(ix·w)τdτdηk(x)dx, +where the third equality is due to the first equality in (i). Notice that thanks to the assumptiones +in (i), we have +� +Rn(x · w)2k(x)dx = 1 +n +� +Rn |x|2k(x)dx. +Then it follows that +1 − ˆk(ξ) +|ξ|2 +− 1 +2n +� +Rn |x|2k(x)dx += +1 +|ξ|2 +� +Rn(x · w)2 +� |ξ| +0 +� η +0 +e−(ix·w)τdτdηk(x)dx − 1 +2 +� +Rn(x · w)2k(x)dx += +1 +|ξ|2 +� +Rn(x · w)2 +� |ξ| +0 +� η +0 +� +e−(ix·w)τ − 1 +� +dτdηk(x)dx. +Lebesgue dominated convergence theorem yields that +lim +ξ→0 +1 − ˆk(ξ) +|ξ|2 +− 1 +2n +� +Rn |x|2k(x)dx = 0. +Thus (ii) is verified and 1 +2n +� +Rn |x|2k(x)dx = A. +Assume that (ii) holds. First choose ξ = (0, ..., ξj, ..., 0), 1 ≤ j ≤ n, with ξj > 0, then +1 − ˆk(ξ) +|ξ|2 += +1 +|ξ|2 +� +1 − +� +Rn e−ix·ξk(x)dx +� += 1 +ξ2 +j +� +Rn +� +1 − e−ixjξj� +k(x)dx += +1 +ξ2 +j +� +Rn (1 − cos(xjξj) + i sin(xjξj)) k(x)dx. +(A.1) +For any R > 0, we have +|1 − ˆk(ξ)| +|ξ|2 +≥ 1 +ξ2 +j +� +BR(0) +(1 − cos(xjξj)) k(x)dx, + +33 +which yields that +lim +ξj→0 +|1 − ˆk(ξ)| +|ξ|2 +≥ 1 +2 +� +BR(0) +x2 +jk(x)dx. +Since R is arbitrary and 1 ≤ j ≤ n, one sees that +1 +2n +� +Rn |x|2k(x)dx ≤ A < +∞. +(A.2) +This also indicates that +� +Rn |x|k(x)dx < +∞. +(A.3) +Next still choose ξ = (0, ..., ξj, ..., 0), 1 ≤ j ≤ n, with ξj > 0. Notice that +1 − ˆk(ξ) +|ξ| += 1 +ξj +� +Rn +� +1 − e−ixjξj� +k(x)dx = 1 +ξj +� +Rn ixj +� ξj +0 +e−ixjηdηk(x)dx, +where x = (0, ..., xj, ..., 0). Due to (A.3), Lebesgue dominated convergence theorem can be +applied and one sees that +0 = lim +ξ→0 +1 − ˆk(ξ) +|ξ| += +� +Rn ixjk(x)dx, +i.e., +� +Rn xjk(x)dx = 0, 1 ≤ j ≤ n. +(A.4) +Now thanks to (A.4), we have +1 +ξ2 +j +� +Rn sin(xjξj)k(x)dx += +1 +ξ2 +j +� +Rn xj +� ξj +0 +cos(xjη)dηk(x)dx = 1 +ξ2 +j +� +Rn xj +� ξj +0 +(cos(xjη) − 1) dηk(x)dx += +1 +ξ2 +j +� +Rn −x2 +j +� ξj +0 +� η +0 +sin(xjτ)dτdηk(x)dx. +Thus (A.2) and Lebesgue dominated convergence theorem imply that +lim +ξj→0 +1 +ξ2 +j +� +Rn sin(xjξj)k(x)dx = 0. +Now in (A.1), letting ξj → 0, again it follows from (A.2) and Lebesgue dominated convergence +theorem that +A += +lim +ξj→0 +1 +ξ2 +j +� +Rn (1 − cos(xjξj)) k(x)dx = lim +ξj→0 +1 +ξ2 +j +� +Rn xj +� ξj +0 +sin(xjη)dηk(x)dx += +lim +ξj→0 +1 +ξ2 +j +� +Rn x2 +j +� ξj +0 +� η +0 +cos(xjτ)dτdηk(x)dx = 1 +2 +� +Rn x2 +jk(x)dx. + +34 +Hence +� +Rn x2 +jk(x)dx = 2A = 1 +n +� +Rn |x|2k(x)dx, +1 ≤ j ≤ n. +(A.5) +At the end, it remains to show that +� +Rn xjxhk(x)dx = 0, 1 ≤ j, h ≤ n, j ̸= h. Choose +ξ = (0, ..., ξj, ..., ξh, ..., 0) with j < h, ξj > 0 and ξh = λξj. Then thanks to (A.4) and (A.5), it +follows that +1 − ˆk(ξ) +|ξ|2 += +1 +|ξ|2 +� +1 − +� +Rn e−ix·ξk(x)dx +� += +1 +ξ2 +j + λ2ξ2 +j +� +Rn +� +1 − e−i(xj+λxh)ξj� +k(x)dx += +1 +ξ2 +j + λ2ξ2 +j +� +Rn i (xj + λxh) +� ξj +0 +e−i(xj+λxh)ηdηk(x)dx += +1 +ξ2 +j + λ2ξ2 +j +� +Rn i (xj + λxh) +� ξj +0 +� +e−i(xj+λxh)η − 1 +� +dηk(x)dx += +1 +ξ2 +j + λ2ξ2 +j +� +Rn (xj + λxh)2 +� ξj +0 +� η +0 +e−i(xj+λxh)τdτdηk(x)dx. +Letting ξj → 0, Lebesgue dominated convergence theorem and (A.5) imply that +A = +1 +2(1 + λ2) +� +Rn (xj + λxh)2 k(x)dx = A + +λ +1 + λ2 +� +Rn xjxhk(x)dx. +This indicates that +� +Rn xjxhk(x)dx = 0, +since λ is an arbitrary constant. The proof is complete. +Data Availability Statement +The authors confirm that this manuscript has no associated data. +References +[1] F. Andreu-Vaillo, J.M. Maz´on, J.D. Rossi, and J.J. Toledo-Melero, +Nonlocal diffu- +sion problems. Mathematical Surveys and Monographs, 165. American Mathematical +Society, Providence, RI; Real Sociedad Matem´atica Espa˜nola, Madrid, 2010. +[2] I. Athanasopoulos and L. Caffarelli, Continuity of the temperature in boundary heat +contral problems, Adv. Math. 224 (2010) 293–315. +[3] I. Athanasopoulos, L. Caffarelli and E. Milakis, The two-phase Stefan problem with +anomolous diffusion, Adv. Math. 406 (2022) 1–19. + +35 +[4] P. Bates and G. Zhao, Existence, uniqueness and stability of the stationary solution +to a nonlocal evolution equation arising in population dispersal, J. Math. Anal. Appl. +332 (2007) 428–440. +[5] C. Br¨andle, E. Chasseigne and F. Quir´os, Phase transitions with midrange interactions: +a nonlocal Stefan model, Siam J. Math. Anal. 44 (2012) 3071–3100. +[6] L. Caffarelli, The regularity of free boundaries in higher dimensions. Acta Math. 139 +(1977) 155–184. +[7] L. Caffarelli, A. Petrosyan, H. Shagholian, Regularity of a free boundary in parabolic +potential theory. J. Am. Math. Soc. 17 (2004) 827–869. +[8] J.-F. Cao, Y. Du, F. Li and W.T. Li, The dynamics of a Fisher-KPP nonlocal diffusion +model with free boundaries, J. Funct. Anal. 277 (2019) 2772–2814. +[9] Y. Du, F. Li and M. Zhou, Semi-wave and spreading speed of the nonlocal Fisher-KPP +equation with free boundaries, J. Math. Pures. Appl. 154 (2021) 30–66. +[10] A. Friedman, The Stefan problem in several space variables. Trans. Ame. Math. Soc. +132 (1968) 51–87. +[11] A. Friedman, Variational principles and free-boundary problems, A Wiley-Interscience +Publication. Pure and Applied Mathematics. John Wiley & Sons, Inc., New York, 1982. +[12] +O. A. Ladyˇzenskaja, V. A. Solonnikov and N. N. Ural’ceva, +Linear and Quasilin- +ear Equations of Parabolic Type, Transl. Math. Monogr., vol. 23, Amer. Math. Soc., +Providence, Rhode Island, U.S.A., 1968. +[13] F. Li, J. Coville and X. Wang, On eigenvalue problems arising from nonlocal diffusion +models, Discrete Contin. Dyn. Syst. 37 (2017), no. 2, 879–903. + diff --git a/lNFQT4oBgHgl3EQfnTbj/content/tmp_files/load_file.txt b/lNFQT4oBgHgl3EQfnTbj/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..752084e5c68d4505d7586fb5a5428415dd0c78af --- /dev/null +++ b/lNFQT4oBgHgl3EQfnTbj/content/tmp_files/load_file.txt @@ -0,0 +1,1076 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf,len=1075 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='13369v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='AP] 31 Jan 2023 Free boundary problem with a nonlocal kernel ∗ Xinfu Chen Department of Mathematics, University of Pittsburgh, Pittsburgh, PA 15260, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Fang Li † School of Mathematics, Sun Yat-sen University, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 135, Xingang Xi Road, Guangzhou 510275, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Maolin Zhou Chern Institute of Mathematics and LPMC, Nankai University, Tianjin 300071, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Abstract In this paper, we propose a new nonlocal model for two-phase Stefan problem, where the nonlocal version of the one-phase Stefan problem arises naturally as a special case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Among other things, we obtain the optimal condition for the pointwise convergence be- tween local and nonlocal one-phase Stefan problem and an equivalent characterization of this optimal condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Moreover, we provide some sufficient criteria for the continuous expansion of free boundaries, and when the sufficient conditions are violated, we construct examples to demonstrate that the jumping phenomena could happen on the free bound- aries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The jumping phenomena is essentially induced by the nonlocal diffusion and thus it does not appear in the classical Stefan problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Keywords: nonlocal Stefan problem, free boundary, jumping phenomena MSC (2020): 35K57, 45K05, 35R35 1 Introduction The classical Stefan problem is well known to describe the evolution of the interface between two phases of a substance undergoing a phase change, for example the melting of a solid, such as ice to water.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Latent heat, defined as the heat or energy that is absorbed or released during a ∗M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Zhou was partially supported by National Key Research and Development Program of China (2021YFA1002400, 2020YFA0713300) and Nankai Zhide Foundation and NSF of China (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 12271437, 11971498).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Li was supported by NSF of China (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 11971498).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' †Corresponding author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' E-mail: lifang55@mail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='sysu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='cn 1 2 phase change of a substance, acts as an energy source or sink at a moving solid-liquid interface, and the resulting boundary condition is known as the Stefan boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In this paper, we propose and study the nonlocal version of two-phase Stefan problem \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 γt(t, x) = a � {γ>0} k(x − y)γ(t, y)dy − aγ(t, x)χ{γ>0} + b � {γ<−ℓ0} η(x − y)(γ(t, y) + ℓ0)dy − b(γ(t, x) + ℓ0)χ{γ<−ℓ0} t > 0, x ∈ Rn, γ(0, x) = γ0(x) x ∈ Rn, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1) where χE denotes the characteristic function of E, the kernel functions k, η satisfy (K) k ∈ C(Rn) ∩ L∞(Rn), k ≥ 0, k(0) > 0, � Rn k(x)dx = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' For clarity, always assume that Ω0 is a smooth and bounded domain in Rn, ℓ0 is a positive constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2) For the initial data, we assume that γ0(x) ∈ L∞(Rn), γ0(x) = −α0 for x ∈ Rn \\ ¯Ω0, α0 ∈ (0, ℓ0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='3) Also denote γ+(t, x) = γ(t, x)χ{γ>0}, (γ(t, x) + ℓ0)− = (γ(t, x) + ℓ0)χ{γ<−ℓ0}, and Ω(t) = {x ∈ Rn | γ(t, x) ≥ 0}, Ω−(t) = {x ∈ Rn | γ(t, x) ≤ −ℓ0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='4) These notations will be used whenever it is more convenient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' To better elaborate the formulation of the model, we first consider the classical one-phase Stefan problem, which is the description, typically, of the melting of a body of ice, maintained at zero degree centigrade, in contact with a region of water initially in Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Based on latent heat and conservation of energy, the model is formulated as follows \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f3 θt(t, x) = d∆θ(t, x) t > 0, x ∈ {θ(t, ·) > 0}, ∇xθ · ∇xs = −ℓ0 t > 0, x ∈ ∂{θ(t, ·) > 0}, θ = 0 t > 0, x ∈ ∂{θ(t, ·) > 0}, θ(0, x) = u0(x) x ∈ ¯Ω0, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5) where θ = θ(t, x) denotes the water’s temperature, the free boundary ∂{θ(t, ·) > 0} at the time t is given by the equation s(x) = t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Also set s(x) = 0 if x ∈ ¯Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' There are many famous papers on the regularity of the free boundary, such as [6, 7, 10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 3 On the basis of latent heat, the nonlocal version of one-phase Stefan problem is proposed as follows \uf8f1 \uf8f2 \uf8f3 γt(t, x) = d � {γ>0} k(x − y)γ(t, y)dy − dγ(t, x)χ{γ>0} t > 0, x ∈ Rn, γ(0, x) = γ0(x) x ∈ Rn, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6) where the kernel function k satisfies (K), and for the initial data, we assume that γ0(x) ∈ L∞(Rn), γ0(x) = −ℓ0 for x ∈ Rn \\ ¯Ω0, γ0|¯Ω0 ≥ 0, γ0|¯Ω0 ̸≡ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='7) The essence of the nonlocal Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6) is at the time t, if x ∈ {x ∈ Rn | γ(t, x) ≤ 0}, then only it can absorb energy from outside;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' if x ∈ {x ∈ Rn | γ(t, x) > 0}, then not only it can absorb energy from outside, but also transfer its energy outside.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Here, the value ℓ0 is in the status of latent heat, γ equal to −ℓ0 corresponds to the status of ice at zero degree centigrade and γ reaching zero represents that there has already been sufficient energy accumulated here for the phase change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The nonlocal version of the two-phase Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1) is proposed in the same spirit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The phase change happens at either γ reaching zero or γ reaching −ℓ0 and the initial data γ = −α0 in Rn \\ ¯Ω0, where α0 ∈ (0, ℓ0), corresponds to the mixture of water and ice at zero degree centigrade.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Different from the one-phase case, in the initial data, γ0|¯Ω0 could change signs and in particular, when both {x ∈ Rn | γ(t, x) < −ℓ0} and {x ∈ Rn | γ(t, x) > 0} are nonempty, in the set {x ∈ Rn | − ℓ0 ≤ γ(t, x) ≤ 0}, the energy could be absorbed and released from outside simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' We point out that the nonlocal version of the one-phase Stefan problems was also proposed and studied in [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Some discussions will be placed when the results obtained in this paper are related to those derived in [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Moreover, the fractional two-phase Stefan problem was treated in [2], and more general, the two-phase Stefan problem with anomalous diffusion was investigated in [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The main purpose of this paper is to study effects of nonlocal diffusion operators on the evolution of free boundaries and explore connections and discrepancies between the local and nonlocal Stefan problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' First of all, we establish results about local existence and global existence for the nonlocal Stefan problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Assume that in the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1), the kernel functions satisfy the assumption (K), the condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2) is valid and the initial data satisfies (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Then the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1) admits a unique classical solution γ(t, ·) ∈ L∞(Rn) defined for all t > 0, and γ satisfies the estimate ess inf Rn γ0 ≤ γ(t, x) ≤ ess sup Rn γ0 for t > 0, x ∈ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='8) Moreover, if γ0|¯Ω0 ∈ C(¯Ω0), then γ(t, ·) is continuous in ¯Ω0 and Rn \\ ¯Ω0 for any t > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 4 Next, we investigate the convergence relations between local and nonlocal Stefan problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' For simplicity, for ǫ > 0, denote kǫ(x) = 1 ǫnk(x ǫ ), ηǫ(x) = 1 ǫnη(x ǫ ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Before we present the main results, we briefly explain what should be the natural and optimal assumptions on the nonlocal kernel functions in the studies of convergence relations between models with local and nonlocal diffusions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Define the Fourier transform of the kernel function k as follows ˆk(ξ) = � Rn e−ix·ξk(x)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Based on the properties of the Fourier transform, one observes that for φ ∈ L1(Rn) � C2(Rn) � Rn e−ix·ξ � 1 ǫ2 � Rn kǫ(x − y)φ(y)dy − 1 ǫ2φ(x) � dx = 1 ǫ2 � ˆk(ǫξ) − 1 � ˆφ(ξ), � Rn e−ix·ξ∆φ(x)dx = −|ξ|2 ˆφ(ξ), and for fixed ξ, lim ǫ→0 1 ǫ2 � ˆk(ǫξ) − 1 � ˆφ(ξ) = −A|ξ|2 ˆφ(ξ) under the condition ˆk(ξ) = 1 − A|ξ|2 + o(|ξ|2) as ξ → 0, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9) where A > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' This observation indicates that the condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9) is optimal in the studies of nonlocal approximation of Laplacian operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Indeed, the nonlocal approximation of heat equation is verified under this condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' See [1] for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' We establish an important equivalent characterization of the condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Assume that k satisfies the assumption (K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Then the following two state- ments are equivalent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (i) For 1 ≤ j, h ≤ n, j ̸= h, � Rn xjk(x)dx = 0, � Rn xjxhk(x)dx = 0, � Rn x2 jk(x)dx = 1 n � Rn |x|2k(x)dx < +∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (ii) The Fourier transform of k satisfies the assumption (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Moreover, 1 2n � Rn |x|2k(x)dx = A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In order not to interrupt the main theme of this paper, we leave the proof of this proposition in the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 5 We first establish the convergence result about the two-phase Stefan problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Let γǫ be the solution of the following problem \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 (γǫ)t(t, x) = a ǫ2 � {γǫ>0} kǫ(x − y)γǫ(t, y)dy − a ǫ2γǫ(t, x)χ{γǫ>0} + b ǫ2 � {γǫ<−ℓ0} ηǫ(x − y)(γǫ(t, y) + ℓ0)dy − b ǫ2(γǫ(t, x) + ℓ0)χ{γǫ<−ℓ0} t > 0, x ∈ Rn, γǫ(0, x) = γ0(x) x ∈ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='10) Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='10), assume that the conditions of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1 are valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In addition, assume that the kernel functions satisfy Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2(i) and � Rn |x|3k(x)dx < +∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='11) Then for any given T > 0, 0 < t < T,γǫ(t, ·) converges to γ(t, ·) in L1 loc(Rn) as ǫ → 0+, where γ ∈ L∞((0, T) × Rn) is the generalized solution of � ∆u ∈ β(u)t, β(u)(0, x) = γ0(x), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='12) where A = a 2 � Rn |z|2k(z)dz, B = b 2 � Rn |z|2η(z)dz, u = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 Aγ for γ > 0 0 for − ℓ0 ≤ γ ≤ 0 B(γ + ℓ0) for γ < −l0 and β(u) is a multivalued mapping defined as follows β(u) = \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f3 1 B u − ℓ0 for u < 0 [−ℓ0, 0] for u = 0 1 Au for u > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Thanks to Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2, one sees that in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='3, only the condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='11) is extra in the studies of convergence relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Obviously, the kernel functions which are radially symmetric and compactly supported satisfy the extra condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Next, the convergence relations between local and nonlocal one-phase Stefan problems are verified under the optimal condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Similar to (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='10), we rescale the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6) as follows \uf8f1 \uf8f2 \uf8f3 γǫt(t, x) = 1 ǫ2 � Rn kǫ(x − y)γ+ ǫ (t, y)dy − 1 ǫ2γ+ ǫ (t, x) t > 0, x ∈ Rn, γǫ(0, x) = γ0(x) x ∈ Rn, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='13) where for simplicity, set d = 1 and denote γ+ ǫ (t, x) = γǫ(t, x)χ{γǫ(t,x)>0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 6 Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='13), assume that the kernel function satisfies the assumption (K), the condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2) is valid and the initial data satisfies (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Also, assume that the Fourier transform of k satisfies (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Then for any given T > 0, γ+ ǫ converges to the solution θ of the one-phase Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5) in the following sense: � t 0 γ+ ǫ (τ, x)dτ → � t min{s(x),t} θ(τ, x)dτ a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' in (0, T) × Rn, where we set d = A in the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The convergence relations between local and nonlocal one-phase Stefan problems is also studied in [5] under the additional conditions that the kernel function is radially symmetric and compactly supported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' From now on, we mainly focus on the nonlocal one-phase Stefan problem and derive some interesting and fundamental properties related to expansion, boundedness and continuity of free boundaries in the nonlocal one-phase Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Due to the lack of regularity in the nonlocal Stefan problems, we will impose an extra condition that γ0|¯Ω0 ∈ C(¯Ω0) on the initial data γ0 when discussing the properties of free boundaries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6), assume that the kernel function satisfies the assumption (K), the condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2) is valid, and the initial data γ0 satisfies (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='7) and the extra condition that γ0|¯Ω0 ∈ C(¯Ω0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' We have the following statements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (i) Expansion: there exists t0 > 0 such that Ω(t) = Ω(0) for 0 ≤ t ≤ t0 and Ω(t1) ⊆ Ω(t2) for 0 < t1 < t2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' (ii) Boundedness: there exists R > 0, which depends on the initial data only, such that Ω(t) ⊆ BR(0) for all t > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5(i) is also proved in [5], where the kernel function is assumed to be compactly supported and radially symmetric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' For the nonlocal two-phase Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1), due to the interaction between Ω(t) and Ω−(t) denoted in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='4), Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5(i) might not hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' However, thanks to the comparison principle, Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5(ii) remains true for both Ω(t) and Ω−(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' We further investigate the continuity of the free boundary in the nonlocal one-phase Stefan problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' For convenience, we prepare an extra assumption about the kernel function as follows (K1) k(x) is radially symmetric, decreasing in |x|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Under the conditions of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5, if additionally assume that ¯Ω0 is convex and the assumption (K1) is valid, then Ω(t) expands continuously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6, extra conditions on the kernel function k(x) and the initial domain Ω0 are needed to guarantee the continuous expansion of the free boundary ∂Ω(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' A natural question is what happens without these extra conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Two examples are constructed to show that when either the extra condition on the kernel function or that on the initial domain Ω0 in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6 is violated, the population range could generate at a distant place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' This is so called jumping 7 phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Since the nonlocal dispersal describes the movement between non-adjacent spatial locations, the jumping phenomena is natural.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' It also reflects the essential differences between local and nonlocal dispersal operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' We also point out that, if allowing the initial data to be nonconstant outside ¯Ω0, similar to [5, Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6], where the kernel function is assumed to be compactly supported and radially symmetric, jumping phenomena could happen by choosing initial data properly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Indeed, the conclusion is valid as long as the kernel function satisfies the assumption (K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' We omit the proof since it is similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' At the end, the main features of our paper are summarized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Formulation of a new nonlocal model for two-phase Stefan problem, where the nonlocal version of the one-phase Stefan problem arises naturally as a special case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The optimal condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9) for the pointwise convergence between local and nonlocal one-phase Stefan problem in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' An equivalent characterization between the conditions (i) about the kernel function and (ii), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=', (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='9), about the Fourier transform of the kernel function in Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' For local and global existence in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1, expansion and boundedness of free bound- aries in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5, we only require the basic assumption (K) on the kernel functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The sufficient conditions derived in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6 for the continuous expansion of the free boundary when the initial data outside initial domain Ω0 is assumed to be a negative constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Counterexamples are constructed to demonstrate that the jumping phenomena could happen when the sufficient conditions are violated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' This paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1 and some preliminary results for the problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1) are established in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In Section 3, we focus on the convergence relations between local and nonlocal Stefan problems and present the proofs of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='3 and Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' In Section 4, Theorems 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='5 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6 related to properties about the free boundary of the nonlocal Stefan problem are verified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Moreover, we construct two examples where jumping phenomena happen, when one of the additional assumptions in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6 is violated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' At the end, the proof of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='2 is included in the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 2 Wellposedness and preliminaries 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1 Local and global existence We first verify the local and global existence to the nonlocal version of the two-phase Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' The same arguments can be applied to the the nonlocal version of the one-phase Stefan problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='6) word by word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' 8 Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/lNFQT4oBgHgl3EQfnTbj/content/2301.13369v1.pdf'} +page_content=' Denote M0 = ∥γ0∥L∞(Rn), Y = L∞(Rn), for s > 0, Xs = � φ ∈ C([0, s), Y) ��φ(0, ·) = γ0(·), ∥φ(t, ·)∥L∞(Rn) ≤ 2M0, t ∈ [0, s) � , and ∥φ∥C([0,s),Y) = sup 0≤t. +Preprint. +Therefore, we require methods that can guarantee robust and +safe performance under general forms of environment un- +certainty. Unfortunately, popular approaches to robustness +in deep RL consider very structured forms of uncertainty +in order to facilitate efficient implementations. Adversarial +methods implement a specific type of perturbation, such +as the application of a physical force (Pinto et al., 2017) +or a change in the action that is deployed (Tessler et al., +2019a). Parametric approaches, on the other hand, consider +robustness with respect to environment characteristics that +can be altered in a simulator (Rajeswaran et al., 2017; Peng +et al., 2018; Mankowitz et al., 2020). When we lack domain +knowledge on the structure of potential disturbances, these +techniques may not guarantee robustness and safety. +Another drawback of existing approaches is their need to +directly modify the environment during training. Parametric +methods assume the ability to generate a range of training +environments with a detailed simulator, while adversarial +methods directly influence the data collection process by at- +tempting to negatively impact performance. In applications +where simulators are inaccurate or unavailable, however, +parametric methods cannot be applied and real-world data +collection may be required for training. In this context, it +is also undesirable to implement adversarial perturbations +while interacting in the environment. Therefore, in many +real-world domains, we must consider alternative methods +for learning safe policies with robustness guarantees. +In this work, we propose a safe RL framework that provides +robustness to general forms of environment disturbances +using standard data collection in a nominal training environ- +ment. We consider robustness over a general uncertainty set +defined using the optimal transport cost between environ- +ment transitions, and we leverage optimal transport theory +to demonstrate how our framework can be efficiently im- +plemented by applying Optimal Transport Perturbations +to state transitions in a completely offline fashion. These +perturbations can be added to the training process of any +safe RL algorithm to incorporate robustness to unknown +disturbances, without harming performance during training +or requiring access to a range of simulated training environ- +ments. +We summarize our contributions as follows: +arXiv:2301.13375v1 [cs.LG] 31 Jan 2023 + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +1. We formulate a safe RL framework that incorporates +robustness to general disturbances using the optimal +transport cost between environment transitions. +2. We show that the resulting distributionally robust opti- +mization problems over transition distributions can be +reformulated as constrained adversarial perturbations +to state transitions in the training environment. +3. We propose an efficient deep RL implementation of our +Optimal Transport Perturbations, which can be applied +in a completely offline fashion without impacting data +collection during training. +4. We demonstrate that the use of Optimal Transport Per- +turbations leads to robust and safe performance both +during training and in the presence of disturbances +through experiments on continuous control tasks with +safety constraints in the Real-World RL Suite (Dulac- +Arnold et al., 2020; 2021). +2. Related Work +2.1. Safe Reinforcement Learning +The most common approach to modeling safety in RL is +to incorporate constraints on expected total costs (Altman, +1999). In recent years, several deep RL algorithms have +been developed for this framework. A popular approach +is to solve the Lagrangian relaxation of the constrained +problem (Tessler et al., 2019b; Ray et al., 2019; Stooke +et al., 2020), which is supported by theoretical results that +constrained RL has zero duality gap (Paternain et al., 2019). +Xu et al. (2021), on the other hand, consider immediate +switching between the reward and cost objectives to better +satisfy safety during training. Alternatively, Achiam et al. +(2017) and Liu et al. (2022) construct closed-form solutions +to guide policy updates in safe RL. +A related line of work focuses on the issue of safe explo- +ration during data collection. In order to promote safety +throughout trajectory rollouts, these methods correct poten- +tially dangerous actions through the use of control barrier +functions (Cheng et al., 2019; Emam et al., 2021; Ma et al., +2021), safety layers (Dalal et al., 2018), learned safety crit- +ics (Srinivasan et al., 2020; Bharadhwaj et al., 2021), and +recovery policies (Thananjeyan et al., 2021; Wagener et al., +2021). In particular, Bharadhwaj et al. (2021) learn a conser- +vative estimate of the safety critic in order to protect against +safety violations during training. Our robust perspective +towards safety can be viewed as an alternative method for +learning a conservative safety critic, which guarantees safety +both during and after training by guarding against unknown +environment disturbances. +2.2. Robust Reinforcement Learning +Robust RL methods account for uncertainty in the environ- +ment by considering worst-case transition distributions from +an uncertainty set (Nilim & Ghaoui, 2005; Iyengar, 2005). +In order to scale the robust RL framework to the deep RL +setting, most techniques have focused on parametric uncer- +tainty or adversarial training. +Domain randomization (Tobin et al., 2017; Peng et al., 2018) +represents a popular approach to parametric uncertainty in +sim-to-real transfer settings, where a policy is trained to +maximize average performance across a range of simulated +training environments. These environments are generated +by modifying important parameters in the simulator, which +are often determined based on domain knowledge. The goal +of maximizing average performance over a range of train- +ing environments has also been referred to as a soft-robust +approach (Derman et al., 2018). Other methods directly +impose a robust perspective towards parametric uncertainty +by focusing on the worst-case training environments gener- +ated over a range of simulator parameters (Rajeswaran et al., +2017; Abdullah et al., 2019; Mankowitz et al., 2020). All +of these approaches assume access to a simulated version +of the real environment, as well as the ability to modify +parameters of this simulator. +Adversarial RL methods represent an alternative approach +to robustness that introduce perturbations directly into the +training process. In order to learn policies that perform +well under worst-case disturbances, these perturbations are +trained to minimize performance. Deep RL approaches to +adversarial training have introduced perturbations in the +form of physical forces in the environment (Pinto et al., +2017), as well as adversarial corruptions to actions (Tessler +et al., 2019a; Vinitsky et al., 2020) and state observations +(Mandlekar et al., 2017; Zhang et al., 2020; Kuang et al., +2022). In this work, we learn adversarial perturbations on +state transitions, but different from adversarial RL methods +we apply these perturbations in a completely offline fashion +without impacting the data collection process. +Finally, safety and robustness have recently been considered +together in a unified RL framework. Mankowitz et al. (2021) +and Russel et al. (2021) propose a formulation that incorpo- +rates robustness into both the objective and constraints in +safe RL. We consider this general framework as a starting +point for our work. +3. Preliminaries +3.1. Safe Reinforcement Learning +Consider an infinite-horizon, discounted Constrained +Markov Decision Process (C-MDP) (Altman, 1999) defined +by the tuple (S, A, p, r, c, ρ0, γ), where S is the set of states, + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +A is the set of actions, p : S × A → P(S) is the transi- +tion probability function where P(S) denotes the space of +probability measures over S, r : S × A → R is the reward +function, c : S × A → R is the cost function, ρ0 is the +initial state distribution, and γ is the discount rate. +We model the agent’s decisions as a stationary policy +π : S → P(A). +For a given C-MDP and policy π, +the expected total discounted rewards and costs are given +by Jp,r(π) = Eτ∼(π,p) [�∞ +t=0 γtr(st, at)] and Jp,c(π) = +Eτ∼(π,p) [�∞ +t=0 γtc(st, at)], respectively, where τ ∼ (π, p) +represents a trajectory sampled according to s0 ∼ ρ0, +at ∼ π( · | st), and st+1 ∼ p( · | st, at). The goal of +safe RL is to find a policy π that maximizes the constrained +optimization problem +max +π +Jp,r(π) +s.t. +Jp,c(π) ≤ B, +(1) +where B is a safety budget on expected total discounted +costs. +We denote the state-action value functions (i.e., Q functions) +of π for a given C-MDP as Qπ +p,r(s, a) and Qπ +p,c(s, a), and +the state value functions as V π +p,r(s) = Ea∼π(·|s)[Qπ +p,r(s, a)] +and V π +p,c(s) = Ea∼π(·|s)[Qπ +p,c(s, a)]. Policy optimization +techniques (Xu et al., 2021; Liu et al., 2022) iteratively op- +timize (1) by considering the related optimization problem +max +π +E +s∼D +� +E +a∼π(·|s) +� +Qπk +p,r(s, a) +�� +s.t. +E +s∼D +� +E +a∼π(·|s) +� +Qπk +p,c(s, a) +�� +≤ B, +(2) +where πk is the current policy and D represents data col- +lected during training. +3.2. Robust and Safe Reinforcement Learning +We are often interested in finding a policy π that achieves +strong, safe performance across a range of related envi- +ronments. In order to accomplish this, Mankowitz et al. +(2021) and Russel et al. (2021) propose a Robust Con- +strained MDP (RC-MDP) framework defined by the tu- +ple (S, A, P, r, c, ρ0, γ), where P represents an uncertainty +set of transition models. We assume P takes the form +P = � +(s,a)∈S×A Ps,a, where Ps,a is a set of transition +models ps,a = p( · | s, a) ∈ P(S) at a given state-action +pair and P is the product of these sets. This structure is +referred to as rectangularity, and is a common assumption +in the literature (Nilim & Ghaoui, 2005; Iyengar, 2005). +The RC-MDP framework leads to a robust version of (1) +given by +max +π +inf +p∈P Jp,r(π) +s.t. +sup +p∈P +Jp,c(π) ≤ B. +(3) +As in the standard safe RL setting, we can iteratively opti- +mize (3) by considering the related optimization problem +max +π +E +s∼D +� +E +a∼π(·|s) +� +Qπk +P,r(s, a) +�� +s.t. +E +s∼D +� +E +a∼π(·|s) +� +Qπk +P,c(s, a) +�� +≤ B, +(4) +where Qπ +P,r(s, a) and Qπ +P,c(s, a) represent robust Q func- +tions. Alternatively, if we only care about robustness with +respect to safety, we can instead consider a nominal or opti- +mistic objective in (4) to promote exploration. +4. Optimal Transport Uncertainty Set +Compared to the standard safe RL update in (2), the only dif- +ference in the robust and safe RL update of (4) comes from +the use of robust Q functions. Therefore, in order to incor- +porate robustness into existing deep safe RL algorithms, we +must be able to efficiently learn Qπ +P,r(s, a) and Qπ +P,c(s, a). +We can write these robust Q functions recursively as +Qπ +P,r(s, a) = r(s, a) + γ +inf +ps,a∈Ps,a +E +s′∼ps,a +� +V π +P,r(s′) +� +, +Qπ +P,c(s, a) = c(s, a) + γ +sup +ps,a∈Ps,a +E +s′∼ps,a +� +V π +P,c(s′) +� +, +where we define the corresponding robust state value +functions as V π +P,r(s′) = Ea′∼π(·|s′) +� +Qπ +P,r(s′, a′) +� +and +V π +P,c(s′) = Ea′∼π(·|s′) +� +Qπ +P,c(s′, a′) +� +. The corresponding +robust Bellman operators (Nilim & Ghaoui, 2005; Iyengar, +2005) can be written as +T π +P,rQr(s, a) := r(s, a) + γ +inf +ps,a∈Ps,a +E +s′∼ps,a +[V π +r (s′)] , +(5) +T π +P,cQc(s, a) := c(s, a) + γ +sup +ps,a∈Ps,a +E +s′∼ps,a +[V π +c (s′)] , +(6) +where we write V π +r (s′) = Ea′∼π(·|s′) [Qr(s′, a′)] and +V π +c (s′) = Ea′∼π(·|s′) [Qc(s′, a′)]. +Note that T π +P,r and T π +P,c are contraction operators, with +Qπ +P,r(s, a) and Qπ +P,c(s, a) their respective unique fixed +points (Nilim & Ghaoui, 2005; Iyengar, 2005). Because +T π +P,r and T π +P,c are contraction operators, we can apply stan- +dard temporal difference (TD) learning techniques to learn +these robust Q functions. In order to do so, we must be + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +able to calculate the Bellman targets in (5) and (6), which +involve optimization problems over transition distributions +that depend on the choice of uncertainty set Ps,a at every +state-action pair. Popular choices of Ps,a in the literature +require the ability to change physical parameters of the en- +vironment (Peng et al., 2018) or directly apply adversarial +perturbations during trajectory rollouts (Tessler et al., 2019a) +to calculate worst-case transitions. +In this work, we use optimal transport theory to consider +an uncertainty set that can be efficiently implemented in +a model-free fashion using only samples collected from a +nominal environment. In order to do so, we assume that S is +a Polish space (i.e., a separable, completely metrizable topo- +logical space). Note that the Euclidean space Rn is Polish, +so this is not very restrictive. Next, we define Ps,a using the +optimal transport cost between transition distributions. +Definition 4.1 (Optimal Transport Cost). Let S be a Polish +space, and let d : S × S → R+ be a non-negative, lower +semicontinuous function satisfying d(s′, s′) = 0 for all +s′ ∈ S. Then, the optimal transport cost between two +transition distributions ˆps,a, ps,a ∈ P(S) is defined as +OTCd(ˆps,a, ps,a) = +inf +ν∈Γ(ˆps,a,ps,a) +� +S×S +d(ˆs′, s′)dν(ˆs′, s′), +where Γ(ˆps,a, ps,a) is the set of all couplings of ˆps,a and +ps,a. +If d is chosen to be a metric raised to some power p ≥ 1, +we recover the p-Wasserstein distance raised to the power +p as a special case. If we let d(ˆs′, s′) = 1ˆs′̸=s′, we recover +the total variation distance as a special case (Villani, 2008). +By considering the optimal transport cost from some nomi- +nal transition distribution ˆps,a, we define the optimal trans- +port uncertainty set as follows. +Definition 4.2 (Optimal Transport Uncertainty Set). For a +given nominal transition distribution ˆps,a and radius ϵs,a +at state-action pair (s, a) ∈ S × A, the optimal transport +uncertainty set is defined as +Ps,a = {ps,a ∈ P(S) | OTCd(ˆps,a, ps,a) ≤ ϵs,a} . +This uncertainty set has previously been considered in ro- +bust RL for the special case of the Wasserstein distance +(Abdullah et al., 2019; Hou et al., 2020; Kuang et al., 2022). +The use of optimal transport cost to compare transition dis- +tributions has several benefits. First, optimal transport cost +accounts for the relationship between states in S through the +function d, and we can choose d to reflect the geometry of +S in a meaningful way. In particular, optimal transport cost +allows significant flexibility in the choice of d, including +threshold-based binary comparisons between states that are +not metrics or pseudo-metrics (Pydi & Jog, 2020). Next, +optimal transport cost remains valid for distributions that do +not share the same support, unlike other popular measures +between distributions such as the Kullback-Leibler diver- +gence. In particular, the optimal transport uncertainty set +can be applied to both stochastic and deterministic transi- +tions. Finally, as we will show in the following sections, +the use of an optimal transport uncertainty set results in an +efficient model-free implementation of robust and safe RL +that only requires the ability to collect data in a nominal +environment. +5. Reformulation as Adversarial +Perturbations to State Transitions +In order to provide tractable reformulations of the Bellman +operators in (5) and (6), we consider the following main +assumptions. +Assumption 5.1. +For any π and Qr(s′, a′) in (5), +V π +r (s′) = Ea′∼π(·|s′) [Qr(s′, a′)] is lower semicontinuous +and Es′∼ˆps,a|V π +r (s′)| < ∞. For any π and Qc(s′, a′) in (6), +V π +c (s′) = Ea′∼π(·|s′) [Qc(s′, a′)] is upper semicontinuous +and Es′∼ˆps,a|V π +c (s′)| < ∞. +Assumption 5.2. Optimal transport plans exist for the dis- +tributionally robust optimization problems in (5) and (6). +Note that Assumptions 5.1–5.2 correspond to assumptions +in Blanchet & Murthy (2019) applied to our setting. In +practice, the use of neural network representations results in +continuous value functions, which are bounded for the com- +mon case when rewards and costs are bounded, respectively. +A sufficient condition for Assumption 5.2 to hold is if S is +compact, or if we restrict our attention to a compact subset +of next states in our definition of Ps,a which is reasonable +in practice. Blanchet & Murthy (2019) also provide other +sufficient conditions for Assumption 5.2 to hold. +Under these assumptions, we can reformulate the Bellman +operators in (5) and (6) to allow for efficient deep RL imple- +mentations. +Lemma 5.3. Let Assumption 5.1 hold. Then, we have +T π +P,rQr(s, a) = r(s, a) + γ sup +λ≥0 +� +E +ˆs′∼ˆps,a +� +inf +s′∈S V π +r (s′) + λ (d(ˆs′, s′) − ϵs,a) +�� +, +(7) +T π +P,cQc(s, a) = c(s, a) + γ inf +λ≥0 +� +E +ˆs′∼ˆps,a +� +sup +s′∈S +V π +c (s′) − λ (d(ˆs′, s′) − ϵs,a) +�� +. +(8) +Proof. According to Theorem 1 in Blanchet & Murthy +(2019), optimal transport strong duality holds for the distri- +butionally robust optimization problems in (5) and (6) under + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +Assumption 5.1. By applying optimal transport strong dual- +ity and substituting these results into (5) and (6), we arrive +at the results in (7) and (8), respectively. See the Appendix +for details. +With the addition of Assumption 5.2, we can further refor- +mulate the results in Lemma 5.3 to arrive at a tractable result +that can be efficiently implemented in a deep RL setting. +Theorem 5.4. Let Assumptions 5.1–5.2 hold, and let G be +the set of all functions from S to S. Then, we have +T π +P,rQr(s, a) = r(s, a) + γ +E +ˆs′∼ˆps,a +� +V π +r (gr +s,a(ˆs′)) +� +, +(9) +T π +P,cQc(s, a) = c(s, a) + γ +E +ˆs′∼ˆps,a +� +V π +c (gc +s,a(ˆs′)) +� +, (10) +where gr +s,a : S → S is a minimizer of +min +g∈G +E +ˆs′∼ˆps,a +[V π +r (g(ˆs′))] +s.t. +E +ˆs′∼ˆps,a +[d(ˆs′, g(ˆs′))] ≤ ϵs,a, +(11) +and gc +s,a : S → S is a maximizer of +max +g∈G +E +ˆs′∼ˆps,a +[V π +c (g(ˆs′))] +s.t. +E +ˆs′∼ˆps,a +[d(ˆs′, g(ˆs′))] ≤ ϵs,a, +(12) +for a given state-action pair (s, a) ∈ S × A. +Proof. First, we show that the dual problems to (11) and +(12) appear in the right-hand side of (7) and (8), repectively. +Next, we use Assumption 5.2 to show that strong duality +holds for these pairs of primal-dual problems. See the Ap- +pendix for details. +Theorem 5.4 demonstrates that we can calculate the Bell- +man operators T π +P,r and T π +P,c by using samples collected +from a nominal environment with transition distributions +ˆps,a, and adversarially perturbing the next state samples +according to (11) and (12), respectively. We refer to the +resulting changes in state transitions as Optimal Transport +Perturbations (OTP). As a result, we have replaced difficult +optimization problems over distribution space in (5) and (6) +with the tractable problems of computing Optimal Transport +Perturbations in state space. Theorem 5.4 represents the +main theoretical contribution of our work, which directly +motivates an efficient deep RL implementation of robust +and safe RL. +Finally, note that these perturbed state transitions are only +used to calculate the Bellman targets in (9) and (10) for +training the robust Q functions Qπ +P,r(s, a) and Qπ +P,c(s, a). +Therefore, unlike other adversarial approaches to robust +RL (Pinto et al., 2017; Tessler et al., 2019a; Vinitsky et al., +s +T π +P,r +T π +P,c +ˆs′ +gr +s,a(ˆs′) +gc +s,a(ˆs′) +ϵs,a +Figure 1. Illustration of Optimal Transport Perturbations from The- +orem 5.4 for a given next state sample ˆs′ ∼ ˆps,a in the nominal +environment. The black arrow denotes the state transition observed +in the nominal environment, and the shaded area denotes the feasi- +ble set in S from (11) and (12). Theorem 5.4 calculates separate +next state perturbations for the robust reward Bellman operator +(shown in blue) and the robust cost Bellman operator (shown in +orange). The dashed arrows denote imagined transitions used only +to calculate Bellman operators. +2020), we do not need to apply our Optimal Transport Per- +turbations during trajectory rollouts. Instead, these pertur- +bations are applied in a completely offline fashion. See +Figure 1 for an illustration. +6. Perturbation Networks for Deep +Reinforcement Learning +From Theorem 5.4, we can calculate Bellman targets for our +robust Q functions Qπ +P,r(s, a) and Qπ +P,c(s, a) by consider- +ing adversarially perturbed versions of next states sampled +from ˆps,a. We can construct these adversarial perturbations +by solving (11) and (12), respectively. Note that the per- +turbation functions gr +s,a, gc +s,a ∈ G from Theorem 5.4 differ +across state-action pairs. We can represent the collection of +perturbation functions at every state-action pair by consid- +ering the perturbation functions gr, gc : S × A × S → S, +which take the state-action pair (s, a) as input for context in +addition to the next state ˆs′ to be perturbed. We let F be the +set of all functions from S × A × S to S, with gr, gc ∈ F. +In order to efficiently calculate the perturbation functions +from Theorem 5.4 in a deep RL setting, we consider the +optimization problems +gr ∈ arg min +g∈Fδ +E +(s,a,ˆs′)∼D [V π +r (g(s, a, ˆs′))] +s.t. +E +(s,a,ˆs′)∼D [d(ˆs′, g(s, a, ˆs′))] ≤ ϵ, +(13) + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +Algorithm 1 Safe RL with Optimal Transport Perturbations +Input: policy π, critics Qr, Qc, OTP networks δr, δc +for k = 0, 1, 2, . . . do +Collect data τ ∼ (π, ˆp) and store it in D +for K updates do +Sample a batch of data (s, a, r, c, ˆs′) ∼ D +Update δr and δc according to (13) and (14) +Estimate T π +P,rQr and T π +P,cQc in (9) and (10) +Update critics Qr, Qc to minimize TD losses +Update policy π according to (4) +end for +end for +and +gc ∈ arg max +g∈Fδ +E +(s,a,ˆs′)∼D [V π +c (g(s, a, ˆs′))] +s.t. +E +(s,a,ˆs′)∼D [d(ˆs′, g(s, a, ˆs′))] ≤ ϵ, +(14) +where (s, a, ˆs′) ∼ D are transitions collected in the nominal +environment and Fδ ⊆ F represents a class of parameter- +ized perturbation functions. The average constraints in (13) +and (14) effectively allow ϵs,a to differ across state-action +pairs while being no greater than ϵ on average. +In the context of deep RL, we consider perturbation func- +tions parameterized by a neural network δ : S ×A×S → S. +In our experiments, we consider tasks where S = Rn and +we apply multiplicative perturbations to state transitions. In +particular, we consider perturbation functions of the form +g(s, a, ˆs′) = s + (ˆs′ − s)(1 + δ(s, a, ˆs′)), +(15) +where δ(s, a, ˆs′) ∈ Rn and all operations are performed +coordinate-wise. By defining Fδ in this way, we obtain +plausible adversarial transitions that are interpretable, where +δ(s, a, ˆs′) represents the percentage change to the nominal +state transition in each coordinate. In practice, we directly +constrain the average magnitude of δ(s, a, ˆs′) by ϵδ, which +can be interpreted as setting ϵs,a to be a percentage of the +average state transition magnitude at every state-action pair. +We train separate reward and cost perturbation networks +δr and δc, and we apply the resulting Optimal Transport +Perturbations to calculate Bellman targets for training the +robust Q functions Qπ +P,r(s, a) and Qπ +P,c(s, a). +7. Algorithm +We summarize our approach to robust and safe RL in Algo- +rithm 1. At every update, we sample previously collected +data from a replay buffer D. We update our reward and cost +perturbation networks δr and δc according to (13) and (14), +Table 1. Summary of performance across all tasks and environment +perturbations. “% Safe” denotes percentage of policies that satisfy +the safety constraint across all tasks and environment perturbations. +Total rewards and costs are normalized relative to the average +performance of CRPO for each task and environment perturbation. +NORMALIZED AVE. +ALGORITHM +% SAFE +REWARD +COST +CRPO +51% +1.00 +1.00 +OTP +87% +1.06 +0.34 +PR-MDP (5%) +82% +1.05 +0.48 +PR-MDP (10%) +88% +0.95 +0.28 +DOMAIN RAND. +76% +1.14 +0.72 +DOMAIN RAND. (OOD) +55% +1.02 +1.02 +respectively. Then, we estimate Bellman targets according +to (9) and (10), which we use to update our critics via stan- +dard TD learning loss functions. Finally, we use these critic +estimates to update our policy according to (4). +Compared to standard safe RL methods, the only additional +components of our approach are the perturbation networks +used to apply Optimal Transport Perturbations, which we +train alongside the critics and the policy using standard +gradient-based methods. Otherwise, the computations for +updating the critics and policy remain unchanged. There- +fore, it is simple to incorporate our Optimal Transport Per- +turbation method into existing deep safe RL algorithms in +order to provide robustness guarantees on performance and +safety. +8. Experiments +We analyze the use of Optimal Transport Perturbations +for robust and safe RL on continuous control tasks with +safety constraints in the Real-World RL Suite (Dulac-Arnold +et al., 2020; 2021). We follow the same experimental de- +sign used in Queeney & Benosman (2023). In particular, +we consider 5 constrained tasks over 3 domains (Cartpole +Swingup, Walker Walk, Walker Run, Quadruped Walk, and +Quadruped Run), which all have horizons of 1,000 with +r(s, a) ∈ [0, 1] and c(s, a) ∈ {0, 1}. In all tasks, we con- +sider a safety budget of B = 100. We train policies in +a nominal training environment for 1 million steps over 5 +random seeds, and we evaluate the robustness of the learned +policies in terms of both performance and safety across a +range of perturbed test environments. See the Appendix for +details on the safety constraints and environment perturba- +tions considered for each task. +In our experiments, we consider the safe RL algorithm +Constraint-Rectified Policy Optimization (CRPO) (Xu et al., +2021), which immediately switches between maximiz- +ing the objective and minimizing the constraint for better + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +700 +800 +Total Reward +Cartpole Swingup +400 +600 +800 +1000 +Walker Walk +200 +400 +600 +Walker Run +600 +800 +1000 +Quadruped Walk +750 +800 +850 +Quadruped Run +0.8 +0.9 +1.0 +1.1 +1.2 +Pole Length +0 +100 +200 +Total Cost +0.1 +0.2 +0.3 +0.4 +0.5 +Torso Length +0 +100 +200 +0.1 +0.2 +0.3 +0.4 +0.5 +Torso Length +0 +100 +200 +CRPO +OTP +PR-MDP (10%) +600 +800 +1000 +1200 +1400 +Torso Density +0 +100 +200 +600 +800 +1000 +1200 +1400 +Torso Density +0 +100 +200 +Figure 2. Comparison of algorithms across tasks and environment perturbations. Performance of PR-MDP is evaluated without adversarial +interventions. Shading denotes half of one standard error across policies. Vertical dotted lines represent nominal training environment. +Top: Total reward. Bottom: Total cost, where horizontal dotted lines represent the safety budget and values below these lines represent +safety constraint satisfaction. +constraint satisfaction compared to Lagrangian-based ap- +proaches. We use the unconstrained deep RL algorithm +Maximum a Posteriori Policy Optimization (MPO) (Abdol- +maleki et al., 2018) to calculate policy updates in CRPO. We +consider a multivariate Gaussian policy, where the mean and +diagonal covariance at a given state are parameterized by a +neural network. We also consider separate neural network +parameterizations for the reward and cost critics. See the +Appendix for additional implementation details, including +network architectures and values of all hyperparameters.1 +We incorporate robustness into this baseline safe RL algo- +rithm in three ways: (i) Optimal Transport Perturbations, +(ii) adversarial RL using the action-robust PR-MDP frame- +work from Tessler et al. (2019a) applied to the safety con- +straint, and (iii) the soft-robust approach of domain random- +ization (Peng et al., 2018; Derman et al., 2018). For our +Optimal Transport Perturbations, we consider the perturba- +tion structure in (15), where δr and δc are neural networks. +We constrain the average per-coordinate magnitude of these +perturbation networks to be less than ϵδ = 0.02 (i.e., 2% +perturbations on average). +Figure 2 shows the total rewards and costs obtained by our +OTP algorithm for each task across a range of perturbed +test environments, compared to CRPO and PR-MDP. The +performance of all algorithms averaged across tasks and test +environments is summarized in Table 1. +8.1. Comparison to Safe RL +By applying Optimal Transport Perturbations to the objec- +tive and constraint in safe RL, we achieve meaningful test- +time improvements compared to the standard non-robust +version of CRPO. While in most cases we observe a de- +1Code is publicly available at https://github.com/ +jqueeney/robust-safe-rl. +crease in total rewards in the nominal environment in order +to achieve robustness, as expected, on average our frame- +work leads to an increase in total rewards of 1.06x relative +to CRPO across the range of test environments. Most im- +portantly, we see a significant improvement in safety, with +our algorithm satisfying constraints in 87% of test cases +(compared to 51% for CRPO) and incurring 0.34x the costs +of CRPO, on average. Note that we achieve this robustness +while collecting data from the same training environment +considered in CRPO, without requiring adversarial inter- +ventions in the environment or domain knowledge on the +structure of the perturbed test environments. +8.2. Comparison to Adversarial RL +Next, we compare our approach to the PR-MDP framework +(Tessler et al., 2019a), an adversarial RL method that ran- +domly applies adversarial actions a percentage of the time +during training. In order to apply this method to the safe RL +setting, we train the adversary to maximize costs. We apply +the default probability of intervention of 10% considered in +Tessler et al. (2019a). As shown in Figure 2, this adversarial +approach leads to robust constraint satisfaction across test +environments (88% of the time compared to 87% for our +OTP framework), especially in the Quadruped environments. +Our OTP framework, on the other hand, leads to improved +constraint satisfaction in the remaining 3 tasks. +However, the robust safety demonstrated by the PR-MDP +approach also results in lower total rewards on average, and +is the only robust approach in Table 1 that underperforms +CRPO in terms of reward. In order to improve performance +with respect to reward, we also considered PR-MDP with +a lower probability of intervention of 5%. We include this +version of PR-MDP in Table 1, and detailed results across +tasks can be found in Figure 4 of the Appendix. While this +less adversarial implementation of PR-MDP is comparable + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +Quadruped Walk +Quadruped Run +0 +50 +100 +Total Cost +CRPO +OTP +PR-MDP (10%) +Figure 3. Comparison of average final training cost in the nominal +training environment. Training cost of PR-MDP includes impact of +adversarial interventions. Horizontal dotted line represents safety +budget. +to our OTP framework in terms of total rewards, it leads to a +decrease in safety constraint satisfaction to 82%. Therefore, +our OTP formulation demonstrates the safety benefits of the +more adversarial setting and the reward benefits of the less +adversarial setting. +In addition, an important drawback of adversarial RL is that +it requires the intervention of an adversary in the training +environment. Therefore, in order to achieve robust safety at +deployment time, the PR-MDP approach incurs additional +cost during training due to the presence of an adversary. +Even in the Quadruped domains where PR-MDP results in +near-zero cost at deployment time, Figure 3 shows that this +algorithm leads to the highest total cost during training due +to adversarial interventions. In many real-world situations, +this additional cost during training is undesirable. Our OTP +framework, on the other hand, achieves the lowest total cost +during training, while also resulting in robust safety when +deployed in perturbed environments. This is due to the fact +that our Optimal Transport Perturbations are applied in a +completely offline fashion. +8.3. Comparison to Domain Randomization +Finally, we compare our OTP framework to the soft-robust +approach of domain randomization (Peng et al., 2018; Der- +man et al., 2018), which assumes access to a range of en- +vironments during training through the use of a simulator. +We consider the same training distributions for domain ran- +domization as in Queeney & Benosman (2023). By train- +ing across a range of environments, domain randomization +achieves strong performance across test cases in terms of +reward (1.14x compared to CRPO, on average), which was +the motivation for its development in the setting of sim-to- +real transfer. However, note that domain randomization was +originally proposed for the unconstrained setting, and we ob- +serve that it does not consistently satisfy safety constraints +outside of its range of training environments (see Figure 5 in +the Appendix). This is likely due to its soft-robust approach +that focuses on average performance across the training dis- +tribution. Domain randomization satisfies safety constraints +in 76% of test cases, which is lower than both OTP and +PR-MDP which explicitly consider robust formulations. +It is also important to note that domain randomization not +only requires access to a range of training environments, it +also requires prior knowledge on the structure of potential +disturbances in order to define its training distribution. In +order to evaluate the case where we lack domain knowledge, +we include an out-of-distribution (OOD) version of domain +randomization in Table 1 that is trained on a distribution +over a different parameter than the one varied in our per- +turbed test environments. See Figure 5 in the Appendix for +detailed results across tasks. When the training distribution +is not appropriately selected, we see that domain randomiza- +tion provides little benefit compared to standard non-robust +safe RL. Our OTP framework, on the other hand, guaran- +tees robust and safe performance under general forms of +environment uncertainty while only collecting data from a +single training environment. +9. Conclusion +In this work, we have developed a general, efficient frame- +work for robust and safe RL. Through the use of optimal +transport theory, we demonstrated that we can guarantee +robustness to general forms of environment disturbances by +applying adversarial perturbations to observed state transi- +tions. These Optimal Transport Perturbations can be effi- +ciently implemented in an offline fashion using only data +collected from a nominal training environment, and can be +easily combined with existing techniques for safe RL to +provide protection against unknown disturbances. +Because our framework makes limited assumptions on the +data collection process during training and does not require +directly modifying the environment, it should be compatible +with many real-world decision making applications. As a +result, we hope that our work represents a promising step +towards trustworthy deep RL algorithms that can be reliably +deployed to improve real-world decision making. +Acknowledgements +This research was partially supported by the NSF under +grants CCF-2200052, CNS-1645681, CNS-2149511, DMS- +1664644, ECCS-1931600, and IIS-1914792, by the ONR +under grants N00014-19-1-2571 and N00014-21-1-2844, by +the NIH under grants R01 GM135930 and UL54 TR004130, +by AFOSR under grant FA9550-19-1-0158, by ARPA-E +under grant DE-AR0001282, by the MathWorks, and by +the Boston University Kilachand Fund for Integrated Life +Science and Engineering. + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +References +Abdolmaleki, A., Springenberg, J. T., Tassa, Y., Munos, R., +Heess, N., and Riedmiller, M. Maximum a posteriori +policy optimisation. In Sixth International Conference on +Learning Representations, 2018. +Abdolmaleki, A., Huang, S., Hasenclever, L., Neunert, M., +Song, F., Zambelli, M., Martins, M., Heess, N., Hadsell, +R., and Riedmiller, M. A distributional view on multi- +objective policy optimization. In Proceedings of the 37th +International Conference on Machine Learning, volume +119, pp. 11–22. PMLR, 2020. +Abdullah, M. A., Ren, H., Ammar, H. B., Milenkovic, +V., Luo, R., Zhang, M., and Wang, J. +Wasserstein +robust reinforcement learning. +arXiv preprint, 2019. +arXiv:1907.13196. +Achiam, J., Held, D., Tamar, A., and Abbeel, P. Constrained +policy optimization. In Proceedings of the 34th Interna- +tional Conference on Machine Learning, volume 70, pp. +22–31. PMLR, 2017. +Altman, E. Constrained Markov Decision Processes. CRC +Press, 1999. +Bharadhwaj, H., Kumar, A., Rhinehart, N., Levine, S., +Shkurti, F., and Garg, A. Conservative safety critics +for exploration. In Ninth International Conference on +Learning Representations, 2021. +Blanchet, J. and Murthy, K. +Quantifying distributional +model risk via optimal transport. Mathematics of Opera- +tions Research, 44(2):565–600, 2019. doi: 10.1287/moor. +2018.0936. +Cheng, R., Orosz, G., Murray, R. M., and Burdick, J. W. +End-to-end safe reinforcement learning through barrier +functions for safety-critical continuous control tasks. In +Proceedings of the AAAI Conference on Artificial Intelli- +gence, volume 33, pp. 3387–3395. AAAI Press, 2019. +Dalal, G., Dvijotham, K., Vecerik, M., Hester, T., Paduraru, +C., and Tassa, Y. Safe exploration in continuous action +spaces. arXiv preprint, 2018. arXiv:1801.08757. +Derman, E., Mankowitz, D. J., Mann, T. A., and Mannor, S. +Soft-robust actor-critic policy-gradient. arXiv preprint, +2018. arXiv:1803.04848. +Dulac-Arnold, G., Levine, N., Mankowitz, D. J., Li, J., +Paduraru, C., Gowal, S., and Hester, T. An empirical in- +vestigation of the challenges of real-world reinforcement +learning. arXiv preprint, 2020. arXiv:2003.11881. +Dulac-Arnold, G., Levine, N., Mankowitz, D. J., Li, J., +Paduraru, C., Gowal, S., and Hester, T. Challenges of real- +world reinforcement learning: definitions, benchmarks +and analysis. Machine Learning, 110:2419–2468, 2021. +doi: 10.1007/s10994-021-05961-4. +Emam, Y., Glotfelter, P., Kira, Z., and Egerstedt, M. +Safe model-based reinforcement learning using ro- +bust control barrier functions. +arXiv preprint, 2021. +arXiv:2110.05415. +Hoffman, M. W., Shahriari, B., Aslanides, J., Barth-Maron, +G., Momchev, N., Sinopalnikov, D., Sta´nczyk, P., Ramos, +S., Raichuk, A., Vincent, D., Hussenot, L., Dadashi, R., +Dulac-Arnold, G., Orsini, M., Jacq, A., Ferret, J., Vieil- +lard, N., Ghasemipour, S. K. S., Girgin, S., Pietquin, O., +Behbahani, F., Norman, T., Abdolmaleki, A., Cassirer, A., +Yang, F., Baumli, K., Henderson, S., Friesen, A., Haroun, +R., Novikov, A., Colmenarejo, S. G., Cabi, S., Gulcehre, +C., Paine, T. L., Srinivasan, S., Cowie, A., Wang, Z., Piot, +B., and de Freitas, N. Acme: A research framework for +distributed reinforcement learning. arXiv preprint, 2020. +arXiv:2006.00979. +Hou, L., Pang, L., Hong, X., Lan, Y., Ma, Z., and Yin, D. Ro- +bust reinforcement learning with Wasserstein constraint. +arXiv preprint, 2020. arXiv:2006.00945. +Iyengar, G. N. Robust dynamic programming. Mathematics +of Operations Research, 30(2):257–280, 2005. doi: 10. +1287/moor.1040.0129. +Kuang, Y., Lu, M., Wang, J., Zhou, Q., Li, B., and Li, H. +Learning robust policy against disturbance in transition +dynamics via state-conservative policy optimization. In +Proceedings of the AAAI Conference on Artificial Intelli- +gence, volume 36, pp. 7247–7254, 2022. +Liu, Z., Cen, Z., Isenbaev, V., Liu, W., Wu, S., Li, B., and +Zhao, D. Constrained variational policy optimization +for safe reinforcement learning. In Proceedings of the +39th International Conference on Machine Learning, pp. +13644–13668. PMLR, 2022. +Ma, H., Chen, J., Eben, S., Lin, Z., Guan, Y., Ren, Y., +and Zheng, S. Model-based constrained reinforcement +learning using generalized control barrier function. In +2021 IEEE/RSJ International Conference on Intelligent +Robots and Systems (IROS), pp. 4552–4559, 2021. doi: +10.1109/IROS51168.2021.9636468. +Mandlekar, A., Zhu, Y., Garg, A., Fei-Fei, L., and Savarese, +S. +Adversarially robust policy learning: Active con- +struction of physically-plausible perturbations. In 2017 +IEEE/RSJ International Conference on Intelligent Robots +and Systems (IROS), pp. 3932–3939, 2017. doi: 10.1109/ +IROS.2017.8206245. +Mankowitz, D. J., Levine, N., Jeong, R., Abdolmaleki, A., +Springenberg, J. T., Shi, Y., Kay, J., Hester, T., Mann, + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +T., and Riedmiller, M. Robust reinforcement learning +for continuous control with model misspecification. In +Eighth International Conference on Learning Represen- +tations, 2020. +Mankowitz, D. J., Calian, D. A., Jeong, R., Paduraru, C., +Heess, N., Dathathri, S., Riedmiller, M., and Mann, T. +Robust constrained reinforcement learning for continuous +control with model misspecification. arXiv preprint, 2021. +arXiv:2010.10644. +Nilim, A. and Ghaoui, L. E. Robust control of Markov +decision processes with uncertain transition matrices. Op- +erations Research, 53(5):780–798, 2005. doi: 10.1287/ +opre.1050.0216. +Paternain, S., Chamon, L., Calvo-Fullana, M., and Ribeiro, +A. Constrained reinforcement learning has zero dual- +ity gap. In Advances in Neural Information Processing +Systems, volume 32. Curran Associates, Inc., 2019. +Peng, X. B., Andrychowicz, M., Zaremba, W., and Abbeel, +P. Sim-to-real transfer of robotic control with dynam- +ics randomization. In 2018 IEEE International Confer- +ence on Robotics and Automation (ICRA), pp. 3803–3810, +2018. doi: 10.1109/ICRA.2018.8460528. +Pinto, L., Davidson, J., Sukthankar, R., and Gupta, A. Ro- +bust adversarial reinforcement learning. In Proceedings +of the 34th International Conference on Machine Learn- +ing, volume 70, pp. 2817–2826. PMLR, 2017. +Pydi, M. S. and Jog, V. Adversarial risk via optimal trans- +port and optimal couplings. In Proceedings of the 37th +International Conference on Machine Learning, volume +119, pp. 7814–7823. PMLR, 2020. +Queeney, J. and Benosman, M. Risk-averse model uncer- +tainty for distributionally robust safe reinforcement learn- +ing. arXiv preprint, 2023. arXiv:2301.12593. +Rajeswaran, A., Ghotra, S., Ravindran, B., and Levine, S. +EPOpt: Learning robust neural network policies using +model ensembles. In 5th International Conference on +Learning Representations, 2017. +Ray, A., Achiam, J., and Amodei, D. Benchmarking safe +exploration in in deep reinforcement learning, 2019. +Russel, R. H., Benosman, M., Van Baar, J., and Corcodel, R. +Lyapunov robust constrained-MDPs: Soft-constrained ro- +bustly stable policy optimization under model uncertainty. +arXiv preprint, 2021. arXiv:2108.02701. +Srinivasan, K., Eysenbach, B., Ha, S., Tan, J., and Finn, C. +Learning to be safe: Deep RL with a safety critic. arXiv +preprint, 2020. arXiv:2010.14603. +Stooke, A., Achiam, J., and Abbeel, P. Responsive safety +in reinforcement learning by PID Lagrangian methods. +In Proceedings of the 37th International Conference on +Machine Learning, volume 119, pp. 9133–9143. PMLR, +2020. +Tessler, C., Efroni, Y., and Mannor, S. Action robust rein- +forcement learning and applications in continuous control. +In Proceedings of the 36th International Conference on +Machine Learning, volume 97, pp. 6215–6224. PMLR, +2019a. +Tessler, C., Mankowitz, D. J., and Mannor, S. Reward +constrained policy optimization. In Seventh International +Conference on Learning Representations, 2019b. +Thananjeyan, B., Balakrishna, A., Nair, S., Luo, M., Srini- +vasan, K., Hwang, M., Gonzalez, J. E., Ibarz, J., Finn, +C., and Goldberg, K. Recovery RL: Safe reinforcement +learning with learned recovery zones. IEEE Robotics +and Automation Letters, 6(3):4915–4922, 2021. +doi: +10.1109/LRA.2021.3070252. +Tobin, J., Fong, R., Ray, A., Schneider, J., Zaremba, W., and +Abbeel, P. Domain randomization for transferring deep +neural networks from simulation to the real world. In +2017 IEEE/RSJ International Conference on Intelligent +Robots and Systems (IROS), pp. 23–30, 2017. doi: 10. +1109/IROS.2017.8202133. +Villani, C. Optimal transport, old and new. Springer, 2008. +doi: 10.1007/978-3-540-71050-9. +Vinitsky, E., Du, Y., Parvate, K., Jang, K., Abbeel, P., and +Bayen, A. Robust reinforcement learning using adversar- +ial populations. arXiv preprint, 2020. arXiv:2008.01825. +Wagener, N. C., Boots, B., and Cheng, C.-A. Safe reinforce- +ment learning using advantage-based intervention. In +Proceedings of the 38th International Conference on Ma- +chine Learning, volume 139, pp. 10630–10640. PMLR, +2021. +Xu, M., Liu, Z., Huang, P., Ding, W., Cen, Z., Li, B., and +Zhao, D. Trustworthy reinforcement learning against +intrinsic vulnerabilities: Robustness, safety, and general- +izability. arXiv preprint, 2022. arXiv:2209.08025. +Xu, T., Liang, Y., and Lan, G. CRPO: A new approach for +safe reinforcement learning with convergence guarantee. +In Proceedings of the 38th International Conference on +Machine Learning, pp. 11480–11491. PMLR, 2021. +Zhang, H., Chen, H., Xiao, C., Li, B., Liu, M., Boning, +D., and Hsieh, C.-J. Robust deep reinforcement learning +against adversarial perturbations on state observations. +In Advances in Neural Information Processing Systems, +volume 33, pp. 21024–21037. Curran Associates, Inc., +2020. + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +A. Proofs +Note that +inf +ps,a∈Ps,a +E +s′∼ps,a +[V π +r (s′)] = − +sup +ps,a∈Ps,a +E +s′∼ps,a +[−V π +r (s′)] . +Therefore, in this section we only prove results for the robust cost Bellman operator T π +P,c. Results related to the robust +reward Bellman operator T π +P,r follow immediately by applying the same proofs after an appropriate change in signs. +A.1. Proof of Lemma 5.3 +Proof. Under Assumption 5.1, note that Assumption (A1) and Assumption (A2) of Blanchet & Murthy (2019) are satisfied +for the distributionally robust optimization problem in (6). Assumption (A1) is satisfied by our definition of optimal transport +cost, and Assumption (A2) is satisfied by our Assumption 5.1. Then, according to Theorem 1 in Blanchet & Murthy (2019), +optimal transport strong duality holds for the distributionally robust optimization problem in (6). Therefore, we have that +sup +ps,a∈Ps,a +E +s′∼ps,a +[V π +c (s′)] = inf +λ≥0 +E +ˆs′∼ˆps,a +� +sup +s′∈S +V π +c (s′) − λ (d(ˆs′, s′) − ϵs,a) +� +. +By substituting this result into (6), we arrive at the result in (8). +A.2. Proof of Theorem 5.4 +Proof. First, we write the dual problem to (12) as +inf +λ≥0 sup +g∈G +E +ˆs′∼ˆps,a +[V π +c (g(ˆs′))] − λ +� +E +ˆs′∼ˆps,a +[d(ˆs′, g(ˆs′))] − ϵs,a +� += inf +λ≥0 sup +g∈G +E +ˆs′∼ˆps,a +[V π +c (g(ˆs′)) − λ (d(ˆs′, g(ˆs′)) − ϵs,a)] . +Using the definition of G, we can rewrite this as +inf +λ≥0 +E +ˆs′∼ˆps,a +� +sup +s′∈S +V π +c (s′) − λ (d(ˆs′, s′) − ϵs,a) +� +, +(16) +which appears in the right-hand side of (8) from Lemma 5.3. As shown in Lemma 5.3, (16) is also the dual to the +distributionally robust optimization problem in (6), and optimal transport strong duality holds. +Next, we show that strong duality holds between (12) and (16). Let λ∗ be the optimal dual variable in (16), and let +g∗ +s,a(ˆs′) ∈ arg max +s′∈S V π +c (s′) − λ∗ (d(ˆs′, s′) − ϵs,a) . +We have that λ∗ and g∗ +s,a(ˆs′) exist according to Theorem 1(b) in Blanchet & Murthy (2019) along with Assumption 5.2, and +g∗ +s,a characterizes the optimal transport plan ν∗ that moves the probability of ˆs′ under ˆps,a to g∗ +s,a(ˆs′). By the complementary +slackness results of Theorem 1(b) in Blanchet & Murthy (2019), we also have that +λ∗ +� +E +ˆs′∼ˆps,a +� +d(ˆs′, g∗ +s,a(ˆs′)) +� +− ϵs,a +� += 0. +Therefore, +inf +λ≥0 +E +ˆs′∼ˆps,a +� +sup +s′∈S +V π +c (s′) − λ (d(ˆs′, s′) − ϵs,a) +� += +E +ˆs′∼ˆps,a +� +V π +c (g∗ +s,a(ˆs′)) − λ∗ � +d(ˆs′, g∗ +s,a(ˆs′)) − ϵs,a +�� += +E +ˆs′∼ˆps,a +� +V π +c (g∗ +s,a(ˆs′)) +� +. +Moreover, by the primal feasibility of the optimal transport plan ν∗, we have that +E +ˆs′∼ˆps,a +� +d(ˆs′, g∗ +s,a(ˆs′)) +� +≤ ϵs,a, + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +Table 2. Safety constraints for all tasks. +SAFETY +TASK +SAFETY CONSTRAINT +COEFFICIENT +CARTPOLE SWINGUP +SLIDER POSITION +0.30 +WALKER WALK +JOINT VELOCITY +0.25 +WALKER RUN +JOINT VELOCITY +0.30 +QUADRUPED WALK +JOINT ANGLE +0.15 +QUADRUPED RUN +JOINT ANGLE +0.30 +Table 3. Perturbation ranges for test environments across domains. +PERTURBATION +NOMINAL +DOMAIN +PARAMETER +VALUE +TEST RANGE +CARTPOLE +POLE LENGTH +1.00 +[0.75, 1.25] +WALKER +TORSO LENGTH +0.30 +[0.10, 0.50] +QUADRUPED +TORSO DENSITY +1,000 +[500, 1,500] +so g∗ +s,a is a feasible solution to (12) with the same objective value as the value of (16). Therefore, strong duality holds +between (12) and (16), and g∗ +s,a is an optimal solution to (12) (i.e., an optimal solution to (12) exists). Then, for any optimal +solution gc +s,a to (12), we have that +E +ˆs′∼ˆps,a +� +V π +c (gc +s,a(ˆs′)) +� += +E +ˆs′∼ˆps,a +� +V π +c (g∗ +s,a(ˆs′)) +� +, +and the right-hand side of (8) is equivalent to the right-hand side of (10). +B. Implementation Details +B.1. Safety Constraints and Environment Perturbations +We consider the same experimental design used in Queeney & Benosman (2023) to define our training and test environments. +For each task, we consider a single safety constraint defined in the Real-World RL Suite, which we summarize in Table 2. A +policy incurs cost in the Cartpole domain when the slider moves outside of a specified range, in the Walker domain when the +velocity of any joint exceeds a threshold, and in the Quadruped domain when joint angles are outside of an acceptable range. +The specific ranges that result in cost violations are determined by the safety coefficients in Table 2, which can take values +in [0, 1] where lower values make cost violations more likely. See Dulac-Arnold et al. (2021) for detailed definitions of the +safety constraints we consider. +We evaluate the performance of learned policies across a range of test environments different from the training environment. +We define these test environments by varying a simulator parameter in each domain across a range of values, which are +listed in Table 3. We vary the length of the pole in the Cartpole domain, the length of the torso in the Walker domain, +and the density of the torso in the Quadruped domain. Note that the parameter value associated with the nominal training +environment is in the center of the range of parameter values considered at test time. +Finally, note that the domain randomization baselines consider a range of environments during training. As summarized +in Table 4, in-distribution domain randomization applies a uniform distribution over the middle 50% of the parameter +values considered at test time. In the out-of-distribution variant of domain randomization, we instead consider a uniform +distribution over a range of values for a different simulator parameter than the one varied at test time. See Table 4 for details. +B.2. Network Architectures +In our experiments, we consider neural network representations of the policy and critics. We consider networks with 3 +hidden layers of 256 units and ELU activations, and we apply layer normalization followed by a tanh activation after the +first layer as in Abdolmaleki et al. (2020). We represent the policy as a multivariate Gaussian distribution with diagonal +covariance, where at a given state the policy network outputs the mean µ(s) and diagonal covariance Σ(s) of the action + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +Table 4. Perturbation parameters and ranges for domain randomization across domains. +IN-DISTRIBUTION +OUT-OF-DISTRIBUTION +PERTURBATION +NOMINAL +TRAINING +PERTURBATION +NOMINAL +TRAINING +DOMAIN +PARAMETER +VALUE +RANGE +PARAMETER +VALUE +RANGE +CARTPOLE +POLE LENGTH +1.00 +[0.875, 1.125] +POLE MASS +0.10 +[0.05, 0.15] +WALKER +TORSO LENGTH +0.30 +[0.20, 0.40] +CONTACT FRICTION +0.70 +[0.40, 1.00] +QUADRUPED +TORSO DENSITY +1,000 +[750, 1,250] +CONTACT FRICTION +1.50 +[1.00, 2.00] +Table 5. Network architectures and algorithm hyperparameters used in experiments. +GENERAL +BATCH SIZE PER UPDATE +256 +UPDATES PER ENVIRONMENT STEP +1 +DISCOUNT RATE (γ) +0.99 +TARGET NETWORK EXPONENTIAL MOVING AVERAGE (τ) +5E-3 +POLICY +LAYER SIZES +256, 256, 256 +LAYER ACTIVATIONS +ELU +LAYER NORM + TANH ON FIRST LAYER +YES +INITIAL STANDARD DEVIATION +0.3 +LEARNING RATE +1E-4 +NON-PARAMETRIC KL (ϵKL) +0.10 +ACTION PENALTY KL +1E-3 +ACTION SAMPLES PER UPDATE +20 +PARAMETRIC MEAN KL (βµ) +0.01 +PARAMETRIC COVARIANCE KL (βΣ) +1E-5 +PARAMETRIC KL DUAL LEARNING RATE +0.01 +CRITICS +LAYER SIZES +256, 256, 256 +LAYER ACTIVATIONS +ELU +LAYER NORM + TANH ON FIRST LAYER +YES +LEARNING RATE +1E-4 +distribution. The diagonal of Σ(s) is calculated by applying the softplus operator to the outputs of the neural network +corresponding to the covariance. In addition to the policy network, we consider separate networks for the reward and cost +critics. We maintain target versions of the policy and critic networks using an exponential moving average of the weights +with τ = 5e−3. Finally, we also consider neural networks for our perturbation networks δr and δc. In this work, we consider +small networks with 2 hidden layers of 64 units and ELU activations. We clip the outputs in the range [−2ϵδ, 2ϵδ] for +additional stability. +B.3. Algorithm Hyperparameters +All of the algorithms in our experiments build upon the baseline safe RL algorithm CRPO (Xu et al., 2021). At every +update, CRPO calculates the current value of the safety constraint based on a batch of sampled data. If the safety constraint +is satisfied for the current batch, it applies a policy update to maximize rewards. Otherwise, it applies a policy update +to minimize costs. In both cases, we use the unconstrained RL algorithm MPO (Abdolmaleki et al., 2018) to calculate +policy updates. MPO calculates a non-parametric target policy with KL divergence ϵKL from the current policy, and +updates the current policy towards this target while constraining separate KL divergence contributions from the mean and +covariance by βµ and βΣ, respectively. We apply per-dimension KL divergence constraints and action penalization using +the multi-objective MPO framework (Abdolmaleki et al., 2020) as in Hoffman et al. (2020), and we consider closed-form + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +Table 6. Network architectures and hyperparameters for Optimal Transport Perturbations. +OPTIMAL TRANSPORT PERTURBATIONS +LAYER SIZES +64, 64 +LAYER ACTIVATIONS +ELU +LAYER NORM + TANH ON FIRST LAYER +NO +OUTPUT CLIPPING +[−2ϵδ, 2ϵδ] +LEARNING RATE +1E-4 +DUAL LEARNING RATE +0.01 +PER-COORDINATE PERTURBATION MAGNITUDE (ϵδ) +0.02 +700 +800 +Total Reward +Cartpole Swingup +400 +600 +800 +1000 +Walker Walk +200 +400 +600 +Walker Run +600 +800 +1000 +Quadruped Walk +750 +800 +850 +Quadruped Run +0.8 +0.9 +1.0 +1.1 +1.2 +Pole Length +0 +100 +200 +Total Cost +0.1 +0.2 +0.3 +0.4 +0.5 +Torso Length +0 +100 +200 +0.1 +0.2 +0.3 +0.4 +0.5 +Torso Length +0 +100 +200 +CRPO +OTP +PR-MDP (5%) +PR-MDP (10%) +600 +800 +1000 +1200 +1400 +Torso Density +0 +100 +200 +600 +800 +1000 +1200 +1400 +Torso Density +0 +100 +200 +Figure 4. Comparison with adversarial RL. Performance of PR-MDP is evaluated without adversarial interventions. Shading denotes half +of one standard error across policies. Vertical dotted lines represent nominal training environment. Top: Total reward. Bottom: Total cost, +where horizontal dotted lines represent the safety budget and values below these lines represent safety constraint satisfaction. +updates of the temperature parameter used in the non-parametric target policy as in Liu et al. (2022) to account for the +immediate switching between objectives in CRPO. See Table 5 for all important hyperparameter values associated with the +implementation of policy updates using MPO, and see Abdolmaleki et al. (2018) for additional details. +For our OTP framework, we update the perturbation networks alongside the policy and critics. In particular, we consider +a constraint on the average per-coordinate magnitude of the outputs of δr and δc determined by the parameter ϵδ, and we +apply gradient-based updates on the Lagrangian relaxations of (13) and (14) using this constraint. We also update the +corresponding dual variables throughout training. +Finally, we also implement PR-MDP and domain randomization using CRPO with MPO policy updates. The adversary in +PR-MDP is updated using MPO with the goal of maximizing costs. Using the default settings from Tessler et al. (2019a), +we apply one adversary update for every 10 policy updates. Domain randomization considers the same updates as the CRPO +baseline, but collects data from the range of training environments summarized in Table 4. +C. Detailed Experimental Results +In this section, we include detailed results across tasks and test environments for all algorithms listed in Table 1. Figure 4 +shows the performance of both variations of the adversarial PR-MDP approach using different probabilities of adversary +intervention. In general, the less adversarial PR-MDP implementation with 5% intervention probability achieves similar total +reward to OTP across all tasks. While this implementation continues to generate strong, safe performance in the Quadruped +domains, it does not lead to robust constraint satisfaction in other tasks such as Cartpole Swingup and Walker Run. Our +OTP framework, on the other hand, results in consistent constraint satisfaction in these tasks. +Figure 5 shows the performance of domain randomization across tasks and environment perturbations. The grey shaded +area represents the range of training distributions for the in-distribution implementation of domain randomization. We +see that domain randomization leads to strong, robust performance in terms of rewards across all test cases, as well as + +Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees +700 +800 +Total Reward +Cartpole Swingup +400 +600 +800 +1000 +Walker Walk +200 +400 +600 +Walker Run +600 +800 +1000 +Quadruped Walk +800 +850 +Quadruped Run +0.8 +0.9 +1.0 +1.1 +1.2 +Pole Length +0 +100 +200 +Total Cost +0.1 +0.2 +0.3 +0.4 +0.5 +Torso Length +0 +100 +200 +0.1 +0.2 +0.3 +0.4 +0.5 +Torso Length +0 +100 +200 +CRPO +OTP +Domain Rand. +Domain Rand. (OOD) +600 +800 +1000 +1200 +1400 +Torso Density +0 +100 +200 +600 +800 +1000 +1200 +1400 +Torso Density +0 +100 +200 +Figure 5. Comparison with domain randomization. Shading denotes half of one standard error across policies. Grey shaded area denotes +range of training distribution for in-distribution version of domain randomization. Vertical dotted lines represent nominal training +environment. Top: Total reward. Bottom: Total cost, where horizontal dotted lines represent the safety budget and values below these +lines represent safety constraint satisfaction. +improved constraint satisfaction in perturbed environments compared to CRPO. However, in tasks such as Walker Run and +Quadruped Run, domain randomization does not robustly satisfy safety constraints for test environments that were not seen +during training. This issue is amplified in the case of out-of-distribution domain randomization, which does not demonstrate +consistent robustness benefits compared to standard safe RL. In fact, it even leads to an increase in constraint-violating +test cases in Cartpole Swingup compared to CRPO. This demonstrates that training on multiple environments does not +necessarily lead to robust performance. Instead, domain knowledge is critical in order for domain randomization to work +well in practice. + diff --git a/mtFQT4oBgHgl3EQfpDZ2/content/tmp_files/load_file.txt b/mtFQT4oBgHgl3EQfpDZ2/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b8d729c81424c46219f6abc5d489e8c4d10a05e --- /dev/null +++ b/mtFQT4oBgHgl3EQfpDZ2/content/tmp_files/load_file.txt @@ -0,0 +1,1079 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf,len=1078 +page_content='Optimal Transport Perturbations for Safe Reinforcement Learning with Robustness Guarantees James Queeney 1 Erhan Can Ozcan 1 Ioannis Ch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' Paschalidis 1 2 Christos G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' Cassandras 1 2 Abstract Robustness and safety are critical for the trustwor- thy deployment of deep reinforcement learning in real-world decision making applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' In particular, we require algorithms that can guaran- tee robust, safe performance in the presence of general environment disturbances, while making limited assumptions on the data collection process during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' In this work, we propose a safe re- inforcement learning framework with robustness guarantees through the use of an optimal trans- port cost uncertainty set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' We provide an efficient, theoretically supported implementation based on Optimal Transport Perturbations, which can be applied in a completely offline fashion using only data collected in a nominal training environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' We demonstrate the robust, safe performance of our approach on a variety of continuous control tasks with safety constraints in the Real-World Reinforcement Learning Suite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' Introduction Deep reinforcement learning (RL) is a data-driven frame- work for sequential decision making that has demonstrated the ability to solve complex tasks, making it a promising ap- proach for improving real-world decision making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' In order for deep RL to be trusted for deployment in real-world deci- sion making settings, however, robustness and safety are of the utmost importance (Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' As a result, tech- niques have been developed to incorporate both robustness and safety into deep RL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' Robust RL methods protect against worst-case environment transitions, while safe RL methods incorporate safety constraints into the training process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' In real-world applications, disturbances in the environment can take many forms that are difficult to model in advance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' 1Division of Systems Engineering, Boston University, Boston, MA, USA 2Department of Electrical and Computer Engineering, Boston University, Boston, MA, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mtFQT4oBgHgl3EQfpDZ2/content/2301.13375v1.pdf'} +page_content=' Correspondence to: James Queeney 0 such that the eigenvalues of cDµ are integers +for any critical point [µ] ∈ PVn, and if moreover [µ] ∈ S n, we show that the eigenvalues are necessarily +nonnegative (Thm. 4.1), which generalizes the nonnegative rationality from Lie algebras to symmetric +Leibniz algerbas (see [12, Thm 3.5]). Besides, we give a description of the extremal points of Fn : Ln → +R, proving that the minimum value is attained at semisimple Lie algebras (Thm. 4.6), while the maximum +value is attained at the direct sum of the two-dimensional non-Lie symmetric Leibniz algebra with the +abelian algebra (Thm. 4.9). Finally, for an arbitrary critical point [µ] of Fn : S n → R, we characterize +the structure of [µ] by virtue of the nonnegative rationality of Dµ (Thm. 4.10–Thm. 4.12). +In Section 5, we classify the critical points of Fn : S n → R with n = 2, 3, which shows that there exist +many critical points that are not Lie algebras. Moreover, we prove that every 2-dimensional symmetric +Leibniz algebra is isomorphic to a critical point of F2; and there exist 3-dimensional symmetric Leibniz +algebras which are not isomorphic to any critical point of F3. +Finally in Section 6, we collect some natural questions concerning the critical points of Fn : Ln → R. +2. Preliminaries +In this section, we recall some basic definitions and results of Leibniz algebras . The ambient field is +always assumed to be the complex number field C unless otherwise stated. + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +3 +Definition 2.1 ([7, 18]). A vector space L over C with a bilinear operation L × L → L, denoted by +(x, y) �→ xy, is called a Leibniz algebra, if every left multiplication is a derivation, i.e., +x(yz) = (xy)z + y(xz) +(2.1) +for all x, y, z ∈ L. +Remark 2.2. Leibniz algebras are sometimes called left Leibniz algebras in the literature, and there +is a corresponding notion of right Leibniz algebra, i.e., an algebra with the property that every right +multiplication is a derivation. In some studies, the authors prefer to call a right Leibniz algebra a Leibniz +algebra. We point out that for our purpose, it actually does not matter which notion is used since the +opposite algebra of a left Leibniz algebra is a right Leibniz algebra and vice versa. +Following Mason and Yamskulna [20], we introduce the notion of the symmetric Leibniz algebra as +follows. +Definition 2.3 ([20]). An algebra is called a symmetric Leibniz algebra if it is at the same time a left and +a right Leibniz algebra, that is +x(yz) = (xy)z + y(xz), +(2.2) +(xy)z = (xz)y + x(yz), +(2.3) +for all x, y, z ∈ L. +Every Lie algebra is clearly a symmetric Leibniz algebra, and the converse is not true. In the following, +we make the convention that an ideal of a Leibniz algebra always means a two-side ideal. +Definition 2.4. Let L be a Leibniz algebra. L is called solvable if L(r) = 0 for some r ∈ N, where +L(0) = L, L(k+1) = L(k)L(k), k ≥ 0. +If I, J are any two solvable ideals of L, then I + J is also a solvable ideal of L, so the maximum +solvable ideal is unique, called the radical of g and denoted by Rad(L) ([7]). +Theorem 2.5 ([2]). A Leibniz algebra L over a field of characteristic 0 admits a Levi decomposition, i.e., +L = S + Rad(L) decomposes into the sum of a semisimple Lie subalgebra S and the radical satisfying +S ∩ Rad(L) = 0. +Definition 2.6. A Leibniz algebra L is called nilpotent if there exists a positive integer n such that any +product of n elements in L, no matter how associated, is zero. + +4 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +For a Leibniz algebra, we define 1L := L, k+1L := L(kL), k ≥ 1. Furthermore, we define +L1 := L, +Lk = +k−1 +� +i=1 +LiLk−i, k ≥ 2. +Then we have the following theorem. +Theorem 2.7 ([7]). For any integer k ≥ 1, then kL = Lk. Moreover, L is nilpotent if and only if there +exists an positive integer n such that Ln = 0. +If I, J are two nilpotent ideals of a Leibniz algebra L, then I + J is also a nilpotent ideal of L, +consequently the maximum nilpotent ideal is unique, called the nilradical, denoted by N(L) ([7, 25]). +Proposition 2.8 ([25]). Let L be a Leibniz algebra over a field of characteristic zero, then LRad(L), +Rad(L)L ⊂ N(L). +3. The moment map for complex algebras +Let Cn be the n-dimensional complex vector space and Vn = ⊗2(Cn)∗ ⊗ Cn be the space of all complex +n-dimensional algebras. The natural action of GL(n) = GL(Cn) on Vn is given by +g.µ(X, Y) = gµ(g−1X, g−1Y), +g ∈ GL(n), X, Y ∈ Cn. +(3.1) +Clearly, GL(n).µ is precisely the isomorphism class of µ, and 0 lies in the boundary of GL(n).µ for any +µ ∈ Vn. By differentiating (3.1), we obtain the natural action gl(n) on Vn, i.e., +A.µ(X, Y) = Aµ(X, Y) − µ(AX, Y) − µ(X, AY), +A ∈ gl(n), µ ∈ Vn. +(3.2) +It follows that A.µ = 0 if and only if A ∈ Der(µ), the derivation algebra of µ. The usual Hermitian inner +product on Cn gives an U(n)-invariant Hermitian inner product on Vn as follows +⟨µ, λ⟩ = +� +i, j,k +⟨µ(Xi, X j), Xk⟩⟨λ(Xi, X j), Xk⟩, +µ, λ ∈ Vn, +(3.3) +where {X1, X2, · · · , Xn} is an arbitrary orthonormal basis of Cn. It is easy to see that gl(n) = u(n) + iu(n) +decomposes into skew-Hermitian and Hermitian transformations of Vn, respectively. Moreover, there is +an Ad(U(n))-invariant Hermitian inner product on gl(n) given by +(A, B) = tr AB∗, A, B ∈ gl(n). +(3.4) +The moment map from symplectic geometry, corresponding to the Hamiltonian action of U(n) on the +symplectic manifold PVn is defined as follows +m : PVn → iu(n), +(m([µ]), A) = (dρµ)eA +∥µ∥2 +, +0 � µ ∈ Vn, A ∈ iu(n), +(3.5) + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +5 +where ρµ : GL(n) → R is given by ρµ(g) = ⟨g.µ, g.µ⟩. Clearly, (dρµ)eA = 2⟨A.µ, µ⟩ for A ∈ iu(n). The +square norm of the moment map is denoted by +Fn : PVn → R, +Fn([µ]) = ∥m([µ])∥2 = (m([µ]), m([µ])), +(3.6) +In order to express m([µ]) explicitly, we define Mµ ∈ iu(n) as follows +Mµ = 2 +� +i +Lµ +Xi(Lµ +Xi)∗ − 2 +� +i +(Lµ +Xi)∗Lµ +Xi − 2 +� +i +(Rµ +Xi)∗Rµ +Xi, +(3.7) +where the left and right multiplication Lµ +X, Rµ +X : Cn → Cn by X of the algebra µ, are given by Lµ +X(Y) = +µ(X, Y) and Rµ +X(Y) = µ(Y, X) for all Y ∈ Cn, respectively. It is not hard to prove that +⟨MµX, Y⟩ =2 +� +i, j +⟨µ(Xi, X j), X⟩⟨µ(Xi, X j), Y⟩ − 2 +� +i, j +⟨µ(Xi, X), X j⟩⟨µ(Xi, Y), X j⟩ +− 2 +� +i, j +⟨µ(X, Xi), X j⟩⟨µ(Y, Xi), X j⟩ +(3.8) +for X, Y ∈ Cn. Note that if the algebra µ is commutative or anticommutative, then the second and third +term of (3.8) are the same, and in this case, Mµ coincides with [12]. +Lemma 3.1. For any 0 � µ ∈ Vn, we have m([µ]) = +Mµ +∥µ∥2. In particular, (Mµ, A) = 2⟨A.µ, µ⟩ for any +A ∈ iu(n). +Proof. For any A ∈ iu(n), we have +(Mµ, A) = tr MµA∗ = tr MµA +and +tr MµA = 2 tr +� +i +Lµ +Xi(Lµ +Xi)∗A − 2 tr +� +i +((Lµ +Xi)∗Lµ +Xi + (Rµ +Xi)∗Rµ +Xi)A +=: I + II. +Note that +I =2 +� +i +tr Lµ +Xi(Lµ +Xi)∗A +=2 +� +i +tr(Lµ +Xi)∗ALµ +Xi +=2 +� +i, j +⟨(Lµ +Xi)∗ALµ +Xi(X j), X j⟩ +=2 +� +i, j +⟨Aµ(Xi, X j), µ(Xi, X j)⟩, + +6 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +and +II = − 2 +� +i, j +⟨((Lµ +Xi)∗Lµ +Xi + (Rµ +Xi)∗Rµ +Xi)AX j, X j⟩ += − 2 +� +i, j +⟨µ(Xi, AX j), µ(Xi, X j)⟩ − 2 +� +i, j +⟨µ(AX j, Xi), µ(X j, Xi)⟩ += − 2 +� +i, j +⟨µ(AXi, X j) + µ(Xi, AX j), µ(Xi, X j)⟩. +By (3.2), it follows that +(Mµ, A) = 2⟨A.µ, µ⟩. +Since A ∈ iu(n), we have ⟨A.µ, µ⟩ = ⟨µ, A.µ⟩. The Lemma is completed by (3.5). +□ +Corollary 3.2. For any µ ∈ Vn, then +(i) tr MµD = 0 for any D ∈ Der(µ) ∩ iu(n); +(ii) tr Mµ[A, A∗] ≥ 0 for any A ∈ Der(µ), and equality holds if and only if A∗ ∈ Der(µ). +Proof. For (i), it follows from Lemma 3.1 and the fact that D is a Hermitian derivation of µ. For (ii), it +follows from that tr Mµ[A, A∗] = 2⟨A∗.µ, A∗.µ⟩ ≥ 0 for any A ∈ Der(µ), and the fact A∗.µ = 0 if and only +if A∗ ∈ Der(µ). +□ +Theorem 3.3. The moment map m : PVn → iu(n), the functional square norm of the moment map +Fn = ∥m∥2 : PVn → R and the gradient of Fn are, respectively, given by +Fn([µ]) = +tr M2 +µ +∥µ∥4 , +grad(Fn)[µ] = 8π∗(Mµ).µ +∥µ∥4 +, +[µ] ∈ PVn, +(3.9) +where π∗ denotes the derivative of π : Vn\{0} → PVn, the canonical projection. Moreover, the following +statements are equivalent: +(i) [µ] ∈ PVn is a critical point of Fn. +(ii) [µ] ∈ PVn is a critical point of Fn|GL(n).[µ]. +(iii) Mµ = cµI + Dµ for some cµ ∈ R and Dµ ∈ Der(µ). +Proof. By (3.6) and Lemma 3.1, we have Fn([µ]) = +tr M2 +µ +∥µ∥4 for any [µ] ∈ PVn. To prove the second one, we +only need to compute the gradient of Fn : Vn \ {0} → R, Fn(µ) = +tr M2 +µ +∥µ∥4 , and then to project it via π∗. If +µ, λ ∈ Vn with µ � 0, then +Re⟨grad(Fn)µ, λ⟩ = d +d +�����t=0 +Fn(µ + tλ) = d +d +�����t=0 +1 +∥µ + tλ∥4 (Mµ+tλ, Mµ+tλ) += − 4 Re⟨Fn(µ) +∥µ∥2 µ, λ⟩ + +2 +∥µ∥4 ( d +d +�����t=0 +Mµ+tλ, Mµ) + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +7 +We claim that ( d +d +���t=0 Mµ+tλ, A) = 4 Re⟨A.µ, λ⟩ for any A ∈ iu(n). Indeed, by Lemma 3.1, we have +( d +d +�����t=0 +Mµ+tλ, A) = d +d +�����t=0 +(Mµ+tλ, A) = 2 d +d +�����t=0 +⟨A.(µ + tλ), µ + tλ⟩ = 2⟨A.λ, µ⟩ + 2⟨A.µ, λ⟩ = 4 Re⟨A.µ, λ⟩. +The claim is therefore proved. It follows that grad(Fn)µ = −4 Fn(µ) +∥µ∥2 µ + 8(Mµ).µ +∥µ∥4 , and consequentely +grad(Fn)[µ] = 8π∗(Mµ).µ +∥µ∥4 +. +So the first part of the theorem is proved, and the following is to prove the equivalence among the +statements (i), (ii) and (iii). +(i) ⇔ (ii) : The equivalence follows from that grad(Fn) is tangent to the GL(n)-orbits. Indeed +grad(Fn)[µ] = 8π∗(Mµ).µ +∥µ∥4 += +8 +∥µ∥4 π∗( d +d +�����t=0 +etMµ.µ) = +8 +∥µ∥4 +d +d +�����t=0 +etMµ.[µ] ∈ T[µ](GL(n).[µ]). +(iii) ⇒ (i) : By (3.2), we know that I.µ = −µ, and (Mµ).µ = (cµI + Dµ).µ = −cµµ. It follows that +grad(Fn)[µ] = 0. +(i) ⇒ (iii) : Since grad(Fn)[µ] = 0, then (Mµ).µ ∈ ker π∗µ = Cµ. So Mµ = cI + D for some c ∈ C +and D ∈ Der(µ). Clearly [D, D∗] = 0, we conclude by Corollary 3.2 that D∗ is also a derivation of µ. In +particular, (c − ¯c)I = D∗ − D ∈ Der(µ), thus c = ¯c ∈ R. +□ +In the frame of algebras, a result due to Ness can be stated as follows +Theorem 3.4 ([21]). If [µ] is a critical point of the functional Fn : PVn �→ R then +(i) Fn|GL(n).[µ] attains its minimum value at [µ]. +(ii) [λ] ∈ GL(n).[µ] is a critical point of Fn if and only if [λ] ∈ U(n).[µ]. +Lemma 3.5. Let [µ] ∈ PVn be a critical point of Fn with Mµ = cµI+Dµ for some cµ ∈ R and Dµ ∈ Der(µ). +Then we have +(i) cµ = +tr M2 +µ +tr Mµ = − 1 +2 +tr M2 +µ +∥µ∥2 < 0. +(ii) If tr Dµ � 0, then cµ = − +tr D2 +µ +tr Dµ and tr Dµ > 0. +Proof. Since Mµ = cµI + Dµ, by Lemma 3.1 and Corollary 3.2 we have +tr Mµ = (Mµ, I) = 2⟨µ, I.µ⟩ = −2∥µ∥2 < 0, +tr M2 +µ = tr Mµ(cµI + Dµ) = cµ tr Mµ. +So cµ = +tr M2 +µ +tr Mµ = − 1 +2 +tr M2 +µ +∥µ∥2 < 0. If tr Dµ � 0, then +0 = tr MµDµ = cµ tr Dµ + tr D2 +µ. +So cµ = − +tr D2 +µ +tr Dµ and tr Dµ > 0. +□ + +8 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +Remark 3.6. In fact, tr Dµ = 0 if and only if Dµ = 0. Indeed, it follows from that 0 = cµ tr Dµ + tr D2 +µ +and Dµ is hermitian. +4. The critical points of the variety of Leibniz algebras +The spaces Ln, Sn of all n-dimensional Leibniz algebras and symmetric Leibniz algebras are alge- +braic sets since they are given by polynomial conditions. Denote by Ln and S n the projective algebraic +varieties obtained by projectivization of Ln and Sn, respectively. Then by Theorem 3.3, we know that +the critical points of Fn : Ln → R, and Fn : S n → R are precisely the critical points of Fn : PVn → R +which lie in Ln and S n, respectively. +4.1. The rationality and nonnegative property. The following rationality and nonnegative property +are generalizations of [12] from Lie algebras to Leibniz algebras and symmetric Leibniz algebras, re- +spectively. +Theorem 4.1. Let [µ] ∈ PVn be a critical point of Fn : PVn → R with Mµ = cµI + Dµ for some cµ ∈ R +and Dµ ∈ Der(µ). Then there exists a constant c > 0 such that the eigenvalues of cDµ are integers prime +to each other, say k1 < k2 < · · · < kr ∈ Z with multiplicities d1, d2, · · · , dr ∈ N. If moreover [µ] ∈ S n, +then the integers are nonnegative. +Proof. The case Dµ = 0 is trivial. In the following, we assume that Dµ is nonzero. Note that Dµ is +Hermitian, then we have the following orthogonal decomposition +Cn = l1 ⊕ l2 ⊕ · · · ⊕ lr, r ≥ 2 +where li := {X ∈ Cn|DµX = ciX} are the eigenspaces of Dµ corresponding to the eigenvalues c1 < c2 < +· · · < cr ∈ R, respectively. Set di = dim li ∈ N, 1 ≤ i ≤ r. Since Dµ is a derivation, we have the following +bracket relations +µ(li, lj) ⊂ lk +if ci + cj = ck, +for all 1 ≤ i, j, k ≤ r. Conversely, if we define a linear transformation A : Cn → Cn by A|li = aiIdli, +where a1, a2, · · · , ar ∈ R satisfying ai + aj = ak for all i, j, k such that ci + cj = ck, then A is a Hermitian +derivation of µ. Clearly, all such derivations form a real vector space, which can be identified with +W := {(w1, w2, · · · , wr) ∈ Rr|wi + w j = wk if ci + cj = ck, 1 ≤ i, j, k ≤ r}. We endow Rr with the usual +inner product, i.e., +⟨x, y⟩ = +� +i +xiyi, +(4.1) +for any x = (x1, x2, · · · , xr), y = (y1, y2, · · · , yr) ∈ Rr. + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +9 +For any derivation A ∈ W, by Corollary 3.2 and Lemma 3.5, we have +0 = tr MµA = tr(cµI + Dµ)A = tr(Dµ − αI)A, +where α = +tr D2 +µ +tr Dµ = +c2 +1d1+c2 +2d2+···+c2 +r dr +c1d1+c2d2+···+crdr > 0. Then we see that (d1(c1 − α), d2(c2 − α), · · · , dr(cr − α)) ⊥ W +relative to (4.1). Put F := W⊥, then by definition it is easy to see that +F = span1≤i, j,k≤r{ei + ej − ek : ci + cj = ck}, +where ei belongs to Rr having 1 in the i-th position and 0 elsewhere. Let {ei1 +ej1 −ek1, · · · , eis +ejs −eks} +be a basis of F, then +(d1(c1 − α), d2(c2 − α), · · · , dr(cr − α)) = +s +� +p=1 +bp(eip + ejp − ekp), +(4.2) +for some b1, b2, · · · , bs ∈ R. Put +E = + +ei1 + ej1 − ek1 +ei2 + ej2 − ek2 +... +eis + ejs − eks + +∈ Zs×r, +then EET ∈ GL(s, Z), and (EET)−1 ∈ GL(s, Q). By (4.2) and the definition of E, we have + +d1(c1 − α) +d2(c2 − α) +... +dr(cr − α) + +r×1 += ET + +b1 +b2 +... +bs + +s×1 +, E + +c1 +c2 +... +cr + +r×1 += + +0 +0 +... +0 + +s×1 +, +E + +1 +1 +... +1 + +r×1 += + +1 +1 +... +1 + +s×1 +. +By the left multiplication of E on (4.2), we have + +0 +0 +... +0 + +s×1 +− α + +1 +1 +... +1 + +s×1 += ED−1ET + +b1 +b2 +... +bs + +s×1 +, +where D = diag(d1, d2, · · · , dr). It is easy to see that (ED−1ET) ∈ GL(s, Q). Consequently +D + +c1 − α +c2 − α +... +cr − α + +r×1 += −αET(ED−1ET)−1 + +1 +1 +... +1 + +s×1 +, +and +1 +α + +c1 +c2 +... +cr + +r×1 += + +1 +... +1 + +r×1 +− D−1ET(ED−1ET)−1 + +1 +1 +... +1 + +s×1 +∈ Qr. +So there exists a constant c > 0 such that the eigenvalues of cDµ are integers prime to each other. + +10 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +If moreover [µ] ∈ S n, we claim that the integers are nonnegative. Indeed, assume that 0 � X ∈ Cn +satisfies DµX = c1X. Then we have +c1Lµ +X = [Dµ, Lµ +X], +c1Rµ +X = [Dµ, Rµ +X]. +It follows that +c1 tr Lµ +X(Lµ +X)∗ = tr[Dµ, Lµ +X](Lµ +X)∗ = tr[Mµ, Lµ +X](Lµ +X)∗ = tr Mµ[Lµ +X, (Lµ +X)∗]. +(4.3) +Similarly +c1 tr Rµ +X(Rµ +X)∗ = tr Mµ[Rµ +X, (Rµ +X)∗]. +(4.4) +Since Lµ +X, Rµ +X are derivations of µ, by Corollary 3.2 we have +c1 tr Lµ +X(Lµ +X)∗ ≥ 0 +and +c1 tr Rµ +X(Rµ +X)∗ ≥ 0. +If Lµ +X or Rµ +X is not zero, then c1 ≥ 0. If Lµ +X and Rµ +X are both zero, then X lies in the center of µ, and by +(3.8) +⟨MµX, X⟩ = 2 +� +i, j +|⟨µ(Xi, X j), X⟩|2 ≥ 0. +(4.5) +Since Mµ = cµI+Dµ, then 0 ≤ ⟨MµX, X⟩ = (cµ+c1)⟨X, X⟩. It follows from Lemma 3.5 that c1 ≥ −cµ > 0. +This completes the proof. +□ +Remark 4.2. Let [µ] be a critical point of Fn : S n → R with Mµ = cµI + Dµ for some cµ ∈ R and +Dµ ∈ Der(µ). If µ is nilpotent, then Dµ is positive definite. Consequently, all nilpotent critical points +of Fn : S n → R are N-graded. Indeed, assume that 0 � X ∈ Cn satisfies DµX = c1X, where c1 +is the smallest eigenvalue of Dµ. By Theorem 4.1, we know that c1 ≥ 0. Suppose that c1 = 0, then +tr Mµ[Lµ +X, (Lµ +X)∗] = 0, and tr Mµ[Rµ +X, (Rµ +X)∗] = 0. Using Corollary 3.2, (Lµ +X)∗ and (Rµ +X)∗ are derivations of +µ. Let l be the symmetric Leibniz algebra (Cn, µ). Consider the orthogonal decomposition of l +l = n1 ⊕ n2 ⊕ · · · ⊕ np, +where p ≥ 2, µ(l, l) = n2 ⊕ · · · ⊕ np, µ(l, µ(l, l)) = l3 ⊕ · · · ⊕ lp, · · · . Since Lµ +X and (Lµ +X)∗ are derivations +of µ, then (Lµ +X)∗ leaves each li invariant and Lµ +X(li) ⊂ li+1. So tr Lµ +X(Lµ +X)∗ = 0, and Lµ +X = 0. Similarly, one +concludes that Rµ +X = 0. That is, X lies in the center of l, which is a contradiction since in this case we +have c1 ≥ −cµ > 0. So Dµ is positive definite. + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +11 +4.2. The minima and maxima of Fn : Ln → R. Following from [12], we introduce the notion of the +type of a critical point. +Definition 4.3. The data set (k1 < k2 < · · · < kr; d1, d2, · · · , dr) in Theorem 4.1 is called the type of the +critical point [µ]. +For any fixed dimension n, it follows from the finiteness of the partitions of n in the proof of Theo- +rem 4.1 that there are only finitely many types of critical points of Fn : PVn → R. +Proposition 4.4. Let [µ] ∈ PVn be a critical point of Fn with type α = (k1 < k2 < · · · < kr; d1, d2, · · · , dr). +Then we have +(i) If α = (0; n), then Fn([µ]) = 4 +n. +(ii) If α � (0; n), then Fn([µ]) = 4 +� +n − (k1d1+k2d2+···+krdr)2 +(k2 +1d1+k2 +2d2+···+k2r dr) +�−1 +. +Proof. We suppose that Mµ = cµI + Dµ, ∥µ∥ = 1. Since tr Mµ = −2⟨µ, µ⟩ = −2, then +tr M2 +µ = tr Mµ(cµI + Dµ) = cµ tr Mµ = −2cµ, +and +Fn([µ]) = tr Mµ2 +∥µ∥4 += tr Mµ2 = −2cµ. +For (i), we have Dµ = 0, so Mµ = cµI and cµn = tr Mµ = −2. Thus cµ = − 2 +n. Fn([µ]) = −2cµ = 4 +n. +For (ii), we have Dµ � 0, and cµ = − +tr D2 +µ +tr Dµ . Note that +Fn([µ]) = tr Mµ2 = tr(cµI + Dµ)2 = c2 +µn + cµ tr Dµ = 1 +4Fn([µ])2n − 1 +2Fn([µ]) tr Dµ, +so we have +1 +Fn([µ]) = 1 +4n − +1 +2Fn([µ]) tr(Dµ) = 1 +4n + 1 +4cµ +tr Dµ = 1 +4(n − (tr Dµ)2 +tr D2µ +). +It follows that Fn([µ]) = 4 +� +n − (k1d1+k2d2+···+krdr)2 +(k2 +1d1+k2 +2d2+···+k2r dr) +�−1 +. +□ +Lemma 4.5. Assume [µ] ∈ PVn, then [µ] is a critical point of Fn : PVn → R with type (0; n) if and only +if Fn([µ]) = 4 +n. Moreover, 4 +n is the minimum value of Fn : PVn → R. +Proof. For any 0 � µ ∈ Vn, we use x1, x2, · · · , xn ∈ R denote the eigenvalues of Mµ. Note that tr Mµ = +−2∥µ∥2, then we have +Fn([µ]) = tr Mµ2 +∥µ∥4 += 4 tr Mµ2 +(tr Mµ)2 = 4 +(x2 +1 + x2 +2 + · · · + x2 +n) +(x1 + x2 + · · · + xn)2 . +It is easy to see that Fn([µ]) ≥ 4 +n with equality holds if and only if x1 = x2 = · · · = xn. So [µ] is a critical +point of Fn : PVn → R with type (0; n) if only if Mµ is a constant multiple of I, if and only Fn attains its +minimum value 4 +n at [µ]. +□ + +12 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +The following theorem shows that even in the frame of Leibniz algebras, the semisimple Lie algebras +are still the only critical points of Fn : Ln → R attaining the minimum value. +Theorem 4.6. Assume that there exists a semisimple Lie algebra of dimension n. Then Fn : Ln → R +attains its minimum value at a point [λ] ∈ GL(n).[µ] if and only if µ is a semisimple Lie algebra. In such +a case, Fn([λ]) = 4 +n. +Proof. Assume that µ is a complex semisimple Lie algebra. It follows from [12, Theorem 4.3] that +Fn : Ln → R attains its minimum value 4 +n at a point [λ] ∈ GL(n).[µ]. +Conversely, assume Fn : Ln → R attains its minimum value at a point [λ] ∈ GL(n).[µ]. Then +by hypothesis, there exists a semisimple Lie algebra of dimension n. The first part of the proof and +Lemma 4.5 imply that Mλ = cλI with cλ < 0. To prove µ is semisimple, it suffices to show that +l = (λ, Cn) is semisimple. Consider the following orthogonal decompositions: (i) l = h ⊕ s, where s +is the radical of λ; (ii) s = a ⊕ nλ, where nλ = λ(s, s) is a nilpotent ideal of l; (iii) nλ = v ⊕ zλ, where +zλ = {Z ∈ nλ : λ(Z, nλ) = λ(nλ, Z) = 0} is the center of nλ. Clearly, zλ is a ideal of l. We have +l = h ⊕ a ⊕ v ⊕ zλ. Suppose that zλ � 0. Let {Hi}, {Ai}, {Vi}, {Zi} be an orthonormal basis of h, a, v, and zλ, +respectively. Put {Xi} = {Hi} ∪ {Ai} ∪ {Vi} ∪ {Zi}. For any 0 � Z ∈ zλ, by hypothesis we have +0 > ⟨MλZ, Z⟩ =2 +� +ij +|⟨λ(Xi, X j), Z⟩|2 − 2 +� +ij +|⟨λ(Z, Xi), X j⟩|2 − 2 +� +ij +|⟨λ(Xi, Z), X j⟩|2 +=2 +� +ij +� +|⟨λ(Zi, H j), Z⟩|2 + |⟨λ(Hi, Z j), Z⟩|2 + |⟨λ(Zi, A j), Z⟩|2 + |⟨λ(Ai, Z j), Z⟩|2� ++ α(Z) +− 2 +� +ij +� +|⟨λ(Z, Hi), Z j⟩|2 + |⟨λ(Z, Ai), Z j⟩|2� +− 2 +� +ij +� +|⟨λ(Hi, Z), Z j⟩|2 + |⟨λ(Ai, Z), Z j⟩|2� +, +where α(Z) = 2 � +ij |⟨λ(Yi, Y j), Z⟩|2 ≥ 0, {Yi} = {Hi} ∪ {Ai} ∪ {Vi}. This implies +0 > +� +k +⟨MλZk, Zk⟩ = +� +k +α(Zk) ≥ 0, +which is a contradiction. So zλ = 0, and consequently, nλ = λ(s, s) = 0. +Suppose that s � 0. Let {Hi}, {Ai} be an orthonormal basis of h, s, respectively. For any 0 � A ∈ s, we +have +0 > ⟨MλA, A⟩ =2 +� +ij +� +|⟨λ(Hi, A j), A⟩|2 + |⟨λ(Ai, H j), A⟩|2� ++ β(A) +− 2 +� +ij +|⟨λ(A, Hi), A j⟩|2 − 2 +� +ij +|⟨λ(Hi, A), A j⟩|2 +where β(A) = 2 � +ij |⟨λ(Hi, H j), A⟩|2 ≥ 0. This implies +0 > +� +k +⟨MλAk, Ak⟩ = +� +k +β(Ak) ≥ 0, +which is a contradiction. So s = 0. Therefore λ is a semisimple Lie algebra. +□ + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +13 +Remark 4.7. By the proof of Theorem 4.6, we know that if [µ] ∈ Ln for which there exists [λ] ∈ +GL(n).[µ] such that Mλ is negative definite, then µ is a semisimple Lie algebra. +We say that an algebra λ degenerates to µ, write as λ → µ if µ ∈ GL(n).λ, the closure of GL(n).λ with +respect to the usual topology of Vn. The degeneration λ → µ is called direct degeneration if there are no +nontrivial chains: λ → ν → µ. The degeneration level of an algebra is the maximum length of chain of +direct degenerations. +Theorem 4.8 ([9]). An n-dimensional Leibniz algebra is of degeneration level one if and only if it is +isomorphic to one of the following +(1) µhy is a Lie algebra: µhy(X1, Xi) = Xi, i = 2, · · · , n; +(2) µhe is a Lie algebra: µhe(X1, X2) = X3; +(3) µsy is a symmetric Leibniz algebra: µsy(X1, X1) = X2; +where {X1, · · · , Xn} is a basis. +The following theorem shows that in the frame of Leibniz algebras, the maximum value of Fn : Ln → +R is attained at symmetric Leibniz algebras that are non-Lie. +Theorem 4.9. The functional Fn : Ln → R attains its maximal value at a point [µ] ∈ Ln, n ≥ 2 if and +only if µ is isomorphic to the symmetric Leibniz algebra µsy. In such a case, Fn([µ]) = 20. +Proof. Assume that Fn : Ln → R attains its maximal value at a point [µ] ∈ Ln, n ≥ 2. By Theorem 3.3, +we know that [µ] is also a critical of Fn : PVn → R. Then it follows Theorem 3.4 that Fn|GL(n).[µ] also +attains its minimum value at a point [µ] , consequently Fn|GL.[µ] is a constant, so +GL(n).[µ] = U(n).[µ] +(4.6) +The relation (4.6) implies that the only non-trivial degeneration of µ is 0 ([13, Theorem 5.1]), conse- +quently the degeneration level of µ is 1. +It is easy to see that the critical point [µhy] is of type (0 < 1; 1, n − 1), [µhe] is of type (2 < 3 < +4; 2, n − 3, 1) and [µsy] is of type (3 < 5 < 6; 1, n − 2, 1). By Proppsition 4.4, we know +Fn([µhy]) = 4, +Fn([µhe]) = 12, +Fn([µsy]) = 20. +So the theorem is proved. +□ +4.3. The structure for the critical points of Fn : S n → R. Note that the maxima and minima of +the functional Fn : Ln → R are actually attained at symmetric Leibniz algebras. In the following, we +characterize the structure for the critical points of Fn : S n → R by virtue of the nonnegative property +(see Theorem 4.1). + +14 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +Theorem 4.10. Let [µ] ∈ S n be a critical point of Fn : S n → R with Mµ = cµI + Dµ of type (0 < k2 < +· · · < kr; d1, d2, · · · , dr) and consider +l = l0 ⊕ l+ +(4.7) +the direct sum of eigenspaces of Dµ with eigenvalues equal to zero, and larger than zero, respectively. +Then the following conditions hold: +(i) (Lµ +A)∗, (Rµ +A)∗ ∈ Der(µ) for any A ∈ l0. +(ii) l0 is a reductive Lie subalgebra. +(iii) l+ is the nilradical of µ, and it corresponds to a critical point of type (k2 < · · · < kr; d2, · · · , dr) +for the functional Fm : S m → R, where m = dim l+. +Proof. For (i), since Dµ, Lµ +A and Rµ +A are derivations of µ, we have +[Dµ, Lµ +A] = Lµ +DµA = 0, +[Dµ, Rµ +A] = Rµ +DµA = 0, +for any A ∈ l0. Then it follows that +tr Mµ[Lµ +A, (Lµ +A)∗] = tr(cµI + Dµ)[Lµ +A, (Lµ +A)∗] += tr Dµ[Lµ +A, (Lµ +A)∗] += tr[Dµ, Lµ +A](Lµ +A)∗ += 0. +So (Lµ +A)∗ ∈ Der(µ) by Corollary 3.2. Similarly, we have (Rµ +A)∗ ∈ Der(µ). This proves (i). +For (ii), let l0 = h ⊕ z be the orthogonal decomposition, where h = µ(l0, l0). We claim that z is the +center of l0. Indeed, by the orthogonal decomposition of eigenspaces (4.7), we have +Lµ +A = +� +Lµ +A|l0 +0 +0 +Lµ +A|l+ +� +, +Rµ +A = +� +Rµ +A|l0 +0 +0 +Rµ +A|l+ +� +, +for any A ∈ l0. Since h is Der(l0)-invariant, then by (i) we know that Lµ +A|l0, Rµ +A|l0 ∈ Der(l0) are of the form +Lµ +A|l0 = +� Lµ +A|h +0 +0 +0 +� +, +Rµ +A|l0 = +� Rµ +A|h +0 +0 +0 +� +, +for any A ∈ l0. So µ(l0, z) = µ(z, l0) = 0, i.e., z lies in the center of l0. Moreover, it follows that h = µ(h, h). +Let h = ¯r ⊕ ¯s be the orthogonal decomposition, where ¯s is the radical of h. Since ¯s is Der(h)-invariant, +then by (i), we know that Lµ +H|h, Rµ +H|h ∈ Der(h) are of the form +Lµ +H|h = +� Lµ +H|¯r +0 +0 +Lµ +H|¯s +� +, +Rµ +H|h = +� Rµ +H|¯r +0 +0 +Rµ +H|¯s +� +, + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +15 +for any H ∈ h. Clearly, ¯r is an ideal of h, and h = µ(h, h) = µ(¯r, ¯r) ⊕ µ(¯s, ¯s). So ¯s = µ(¯s, ¯s). Since ¯s is +solvable, we conclude that ¯s = 0. Therefore h is a semisimple Lie algebra by Theorem 2.5, and moreover +we deduce that z is the center of f. This proves (ii). +For (iii), it follows from (ii) that s := z ⊕ l+ is the radical of l. Assume that Z ∈ z belongs to the +nilradical of µ, then Lµ +Z, Rµ +Z : l → l are necessarily nilpotent derivations of l. By (i), we know that for any +Z ∈ z, the derivations (Lµ +Z)∗, (Rµ +Z)∗ vanish on l0, and in particularly, (Lµ +Z)∗Z = 0, (Rµ +Z)∗Z = 0. Hence +[(Lµ +Z)∗, Lµ +Z] = 0, +[(Rµ +Z)∗, Rµ +Z] = 0. +That is, Lµ +Z and Rµ +Z are both normal and nilpotent operators, so Lµ +Z = Rµ +Z = 0, i.e., Z lies in the center of l. +This however, contradicts Z ∈ l0. So Z = 0 and l+ is the nilradical of l. Set n := l+, and denote by µn the +corresponding element in S m, where m = dim l+. Assume that {Ai} is an orthonormal basis of l0, then by +(3.8), we have +Mµ|n = Mµn + 2 +� +i +([Lµ +Ai, (Lµ +Ai)∗] + [Rµ +Ai, (Rµ +Ai)∗])|n. +(4.8) +Using (i) and Corollary 3.2, it follows that +tr Mµn[Lµ +Ai, (Lµ +Ai)∗]|n = tr Mµn[Rµ +Ai, (Rµ +Ai)∗]|n = 0. +Since tr Mµ[Lµ +Ai, (Lµ +Ai)∗] = tr Mµ[Rµ +Ai, (Rµ +Ai)∗] = 0, by (4.8) we have +tr Mµ[Lµ +Ai, (Lµ +Ai)∗] = tr Mµ|n[Lµ +Ai, (Lµ +Ai)∗]n = 0, +tr Mµ[Rµ +Ai, (Rµ +Ai)∗] = tr Mµ|n[Rµ +Ai, (Rµ +Ai)∗]n = 0. +Put T = � +i([Lµ +Ai, (Lµ +Ai)∗] + [Rµ +Ai, (Rµ +Ai)∗])|n, then we have tr T 2 = 0. Since T is Hermitian, we conclude +that T = 0. So n = l+ corresponds to a critical point of type (k2 < · · · < kr; d2, · · · , dr) for the functional +Fm : S m → R. +□ +In fact, it follows from the proof of Theorem 4.10 that Lµ +Z, Rµ +Z are normal operators for any Z ∈ z(l0). +Next, we characterize the critical points that lie in S n in terms of those which are nilpotent. +Theorem 4.11 (Solvable extension). Assume that a is an abelian Lie algebra of dimension d1, and [λ] is +critical point of Fm : S m → R of type (k2 < · · · < kr; d2, · · · , dr) where k2 > 0. Consider the direct sum +µ = a ⋉ρ λ, +where ρ = (Lρ, Rρ), and Lρ : Cd1 × Cm → Cm, Rρ : Cm × Cd1 → Cm are bilinear mappings such that µ is +a symmetric Leibniz algebra with bracket relations given by +µ(A + X, B + Y) := Lρ +A(Y) + Rρ +B(X) + λ(X, Y) +for all A, B ∈ Cd1, X, Y ∈ Cm. Assume that the following conditions are satisfied + +16 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +(i) [Dλ, Lρ +A] = 0, [Dλ, Rρ +A] = 0, ∀A ∈ Cd1. +(ii) [Lρ +A, (Lρ +A)∗] = 0, [Rρ +A, (Rρ +A)∗] = 0, ∀A ∈ Cd1; and for each 0 � A ∈ Cd1, Lρ +A or Rρ +A is not zero. +If we extend the Hermitian inner product on Cm by setting +⟨A, B⟩ = − 2 +cλ +(tr Lρ +A(Lρ +B)∗ + tr Rρ +A(Rρ +B)∗), A, B ∈ Cd1, +then [µ] is a solvable critical point of type (0 < k2 < · · · < kr; d1, d2, · · · , dr) for Fn : S n → R, n = d1+m. +Proof. Put n = (Cm, λ), and let {Xi} be an orthonormal basis of Cm. It follows from (ii) that (Lρ +A)∗, (Rρ +A)∗ ∈ +Der(λ) for all A ∈ Cd1. Then we have +⟨MµX, A⟩ = −2 +� +i, j +⟨µ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 +� +i, j +⟨µ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ += −2 +� +i, j +⟨λ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 +� +i, j +⟨λ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ += −2 tr(Rρ +A)∗Rλ +X − 2 tr(Lρ +A)∗Lλ +X += 0, +for any A ∈ Cd1, X ∈ Cm since λ is nilpotent and (Lρ +A)∗, (Rρ +A)∗ ∈ Der(λ). So Mµ leaves a and n invariant, +and moreover, it is not hard to see that Mµ|n = Mλ = cλI + Dλ by (3.8). On the other hand, we have +⟨MµA, B⟩ = −2 +� +i, j +⟨µ(Xi, A), X j⟩⟨µ(Xi, B), X j⟩ − 2 +� +i, j +⟨µ(A, Xi), X j⟩⟨µ(B, Xi), X j⟩ += −2(tr Lρ +A(Lρ +B)∗ + tr Rρ +A(Rρ +B)∗) += cλ⟨A, B⟩, +for any A, B ∈ Cd1. So Mµ = cµI + Dµ, where cµ = cλ and +Dµ = +� 0 +0 +0 +Dλ +� +∈ Der(µ). +This completes the proof. +□ +Theorem 4.12 (General extension). Assume that f = h ⊕ z is a reductive Lie algebra of dimension d1, +and [λ] is critical point of Fm : S m → R of type (k2 < · · · < kr; d2, · · · , dr) where k2 > 0. Consider the +direct sum +µ = f ⋉ρ λ, +where ρ = (Lρ, Rρ), and Lρ : Cd1 × Cm → Cm, Rρ : Cm × Cd1 → Cm are bilinear mappings such that µ is +a symmetric Leibniz algebra with bracket relations given by +µ(A + X, B + Y) := adf A(B) + Lρ +A(Y) + Rρ +B(X) + λ(X, Y) +for all A, B ∈ Cd1, X, Y ∈ Cm. Assume that the following conditions are satisfied + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +17 +(i) [Dλ, Lρ +A] = 0, [Dλ, Rρ +A] = 0, ∀A ∈ Cd1. +(ii) [Lρ +Z, (Lρ +Z)∗] = 0, [Rρ +Z, (Rρ +Z)∗] = 0, ∀Z ∈ z; and for each 0 � Z ∈ z, Lρ +Z or Rρ +Z is not zero. +Let ⟨·, ·⟩1 be a Hermitian inner product on f and {Hi | Hi ∈ h} ∪ {Zi |Zi ∈ z} be an orthonormal basis +of (f, ⟨·, ·⟩1) such that (adf Hi)∗1 = − adf Hi, (Lρ +Hi)∗ = −Lρ +Hi, (Rρ +Hi)∗ = −Rρ +Hi for all i. If we extend the +Hermitian inner product on Cm by setting +⟨A, B⟩ = − 2 +cλ +(tr adf A(adf B)∗1 + tr Lρ +A(Lρ +B)∗ + tr Rρ +A(Rρ +B)∗), A, B ∈ Cd1, +then [µ] is a critical point of type (0 < k2 < · · · < kr; d1, d2, · · · , dr) for Fn : S n → R, n = d1 + m. +Proof. Put n = (Cm, λ), and let {Ai} = {Hi, Zi} be the orthonormal basis of (Cd1, ⟨·, ·⟩1) as in hypothesis, +and {Xi} be an orthonormal basis of Cm. Then for any A ∈ Cd1, X ∈ Cm, we have +⟨MµX, A⟩ = −2 +� +i, j +⟨µ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 +� +i, j +⟨µ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ += −2 +� +i, j +⟨λ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 +� +i, j +⟨λ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ += −2 tr(Rρ +A)∗Rλ +X − 2 tr(Lρ +A)∗Lλ +X += 0, +since λ is nilpotent and (Lρ +A)∗, (Rρ +A)∗ ∈ Der(λ). So Mµ leaves f and n invariant, and it is not hard to see +that Mµ|n = Mλ = cλI + Dλ by (3.8). Moreover, for any A, B ∈ Cd1, we have +⟨MµA, B⟩ = 2 +� +i, j +⟨µ(Ai, A j), A⟩⟨µ(Ai, A j), B⟩ +− 2 +� +i, j +⟨µ(Ai, A), A j⟩⟨µ(Ai, B), A j⟩ − 2 +� +i, j +⟨µ(Xi, A), X j⟩⟨µ(Xi, X), X j⟩ +− 2 +� +i, j +⟨µ(A, Ai), A j⟩⟨µ(B, Ai), A j⟩ − 2 +� +i, j +⟨µ(A, Xi), X j⟩⟨µ(X, Xi), X j⟩ += −2(tr adf A(adf B)∗1 + tr Lρ +A(Lρ +B)∗ + tr Rρ +A(Rρ +B)∗) += cλ⟨A, B⟩. +So Mµ = cµI + Dµ, where cµ = cλ, and +Dµ = +� 0 +0 +0 +Dλ +� +∈ Der(µ). +This completes the proof. +□ +5. Examples +In this section, we classify the critical points of the functional Fn : S n → R for n = 2 and 3, +respectively. We show that every two-dimensional symmetric Leibniz algebra is isomorphic to a critical + +18 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +point of F2; and there exist three-dimensional symmetric Leibniz algebras which are not isomorphic to +any critical point of F3. +5.1. Two-dimensional case. Note that there are only two non-abelian two-dimensional symmetric Leib- +niz algebras up to isomorphism, which is defined by +Lie: [e1, e2] = e2; +non-Lie: [e1, e1] = e2. +It is easy to see that the Lie algebra is a critical point of F2 with type (0 < 1; 1, 1), and the critical value is +4; The non-Lie symmetric Leibniz algebra is a critical point of F2 with type (1 < 2; 1, 1), and the critical +value is 20. +5.2. Three-dimensional case. The classification of 3-dimensional Leibniz algebras over C can be found +in [1, 6]. We classify the critical points of the functional F3 : S 3 → R as follows +TABLE I. non-zero 3-dimensional symmetric Leibniz algebras, critical types and critical values. +g +Type +Multiplication table +Critical type +Critical value +L1 +Lie +� +[e1, e2] = e3 +(1 < 2; 2, 1) +12 +L2 +Lie +� +[e1, e2] = e2 +(0 < 1; 1, 2) +4 +L3(α), α � 0 +Lie +� +[e3, e1] = e1, [e3, e2] = αe2, +(0 < 1; 1, 2) +4 +L4 +Lie +� +[e3, e1] = e1 + e2, [e3, e2] = e2 +− +− +L5 +Lie +�[e3, e1] = 2e1, [e3, e2] = −2e2 +[e1, e2] = e3 +(0; 3) +4 +3 +S1 +non-Lie +� +[e3, e3] = e1 +(3 < 5 < 6; 1, 1, 1) +20 +S2 +non-Lie +� +[e2, e2] = e1, [e3, e3] = e1 +(1 < 2; 2, 1) +12 +S3(2) +non-Lie +�[e2, e2] = 2e1, [e3, e2] = e1, +[e3, e3] = e1 +− +− +S3(β), β � 2 +non-Lie +�[e2, e2] = βe1, [e3, e2] = e1, +[e3, e3] = e1 +(1 < 2; 2, 1) +12 +S4 +non-Lie +� +[e1, e3] = e1 +(0 < 1; 1, 2) +4 +S5(α), α � 0 +non-Lie +�[e1, e3] = αe1, [e2, e3] = e2, +[e3, e2] = −e2 +(0 < 1; 1, 2) +4 +S6 +non-Lie +�[e2, e3] = e2, [e3, e2] = −e2, +[e3, e3] = e1 +− +− +S7(α), α � 0 +non-Lie +� +[e1, e3] = αe1, [e2, e3] = e2 +(0 < 1; 1, 2) +4 +S8 +non-Lie +� +[e1, e3] = e1 + e2, [e3, e3] = e1 +− +− +6. Some questions +By Theorem 4.1, we know that eigenvalue types for the critical points of Fn : S n → R are neces- +sarily nonnegative. From Theorem 4.6 and Theorem 4.9, we know that the maxima and minima of the + +THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS +19 +functional Fn : Ln → R are actually attained at the symmetric Leibniz algebras. So it is natural and +interesting to ask the following questions. +Question 6.1. Do all critical points of Fn : Ln → R necessarily have nonnegative eigenvalue types? +Question 6.2. Do all critical points of Fn : Ln → R necessarily lie in S n? +Note that if Question 6.2 holds, then Question 6.1 holds . +7. Acknowledgement +This paper is partially supported by NSFC (11931009 and 12131012) and NSF of Tianjin (19JCY- +BJC30600). +8. Data Availability Statements +The author declares that all data supporting the findings of this study are available within the article +References +[1] Ayupov, Sh.A.; Omirov, B.A.: On Leibniz algebras, in: Algebra and Operator Theory (Tashkent,1997), KluwerAcad. +Publ. Dordrecht, 1998, pp.1–12. +[2] Barnes, D.W.: On Levi’s theorem for Leibniz algebras, Bull. Austral. Math. Soc. 86 (2012), no. 2, 184–185. +[3] Bloh, A.: On a generalization of the concept of Lie algebra (in Russian), Dokl. Akad. Nauk SSSR 165 (1965), 471–473; +translated into English in Soviet Math. Dokl. 6 (1965), 1450–1452. +[4] B¨ohm, C.; Lafuente, R.A.: Immortal homogeneous Ricci flows, Invent. Math. 212 (2018), no. 2, 461–529. +[5] Bonezzi, R.; Hohm, O.: Leibniz gauge theories and infinity structures, Commun. Math. Phys. 377 (2020), 2027–2077. +[6] Casas, J. M.; Insua, M. A.; Ladra, M.; Ladra, S.: An algorithm for the classification of 3-dimensional complex Leibniz +algebras, Linear Algebra Appl. 436 (2012), no. 9, 3747–3756. +[7] Feldvoss, J.: Leibniz algebras as nonassociative algebras. In: Vojtechovsky, P., Bremner, M. R., Carter, J. S., Evans, A. +B., Huerta, J., Kinyon, M. K., Moorhouse, G. E., Smith, J. D. H., eds. Nonassociative mathematics and its applications, +Vol. 721. Providence, RI: American Mathematical Society, (2019) p. 115–149. +[8] Hohm, O.; Samtleben, H.: Leibniz-Chern-Simons theory and phases of exceptional field theory, Commun. Math. Phys. +369 (2019), 1055–1089. +[9] Khudoyberdiyev, A.; Omirov, B.: The classification of algebras of level one, Linear Algebra Appl. 439(11) (2013), 3460– +3463. +[10] Kirwan, K.: Momentum maps and reduction in algebraic geometry, Differ. Geom. Appl. 9 (1998)135–172. +[11] Kotov, A.; Strobl, T.: The embedding tensor, Leibniz-Loday algebras, and their higher Gauge theories, Commun. Math. +Phys. 376 (2020), 235–258. +[12] Lauret, J.: On the moment map for the variety of Lie algebras, J. Funct. Anal. 202 (2003), 392–423. +[13] Lauret, J.: Degenerations of Lie algebras and geometry of Lie groups, Differ. Geom. Appl. 18 (2003), no. 2, 177–194. +[14] Lauret, J.: Einstein solvmanifolds are standard, Ann. Math. 172 (2010), 1859–1877. +[15] Lauret, J.: Ricci soliton solvmanifolds, J. Reine. Angew. Math. 650 (2011), 1–21. +[16] Lavau, S.: Tensor hierarchies and Leibniz algebras, J. Geom. Phys. 144 (2019), 147–189. +[17] Lavau, S., Palmkvist, J.: Infinity-enhancing Leibniz algebras, Lett. Math. Phys. 110 (2020), 3121–3152. +[18] Loday, J.-L.: Une version non commutative des algbres de Lie: les algbres de Leibniz. (French) [A noncommutative +version of Lie algebras: the Leibniz algebras], Enseign. Math. (2) 39 (1993), no. 3-4, 269–293. +[19] Loday, J.-L.; Pirashvili, T.: Universal enveloping algebras of Leibniz algebras and (co)homology, Math. Ann. 296 (1993), +no. 1, 139–158. +[20] Mason, G.; Yamskulna, G.: Leibniz algebras and Lie algebras, SIGMA Symmetry Integrability Geom. Methods Appl. 9 +(2013), Paper 063, 10 pp. +[21] Ness, L.: A stratification of the null cone via the moment map, Amer. J. Math. 106 (1984), 1281-1329 (with an appendix +by D. Mumford). + +20 +ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG +[22] Sheng, Y.; Tang, Rong.; Zhu, C.: The controlling L∞-algebra, cohomology and homotopy of embedding tensors and +Lie-Leibniz triples. Commun. Math. Phys. 386 (2021), 269–304. +[23] Strobl, T.: Leibniz-Yang-Mills gauge theories and the 2-Higgs mechanism, Phys. Rev. D 99 (2019), 115026. +[24] Strobl, T.; Wagemann, F.: Enhanced Leibniz algebras: structure theorem and induced Lie 2-algebra, Commun. Math. +Phys. 376 (2020), 51–79. +[25] Towers, D.A.: On the nilradical of a Leibniz algebra, Commun. Algebra. 49 (2021), no. 10, 4345–4347. +[26] Zhang, H.; Chen, Z.; Li, L.: The moment map for the variety of 3-Lie algebras, to appear in J. Funct. Anal. 2022. +(Zhiqi Chen) School of Mathematics and Statistics, Guangdong University of Technology, Guangzhou 510520, P.R. China +Email address: chenzhiqi@nankai.edu.cn +(Saiyu Wang) School of Mathematical Sciences and LPMC, Nankai University, Tianjin 300071, P.R. China +Email address: 2120200040@mail.nankai.edu.cn +(Hui Zhang) School of Mathematics, Southeast University, Nanjing 210096, P.R. China +Email address: 2120160023@mail.nankai.edu.cn + diff --git a/o9FPT4oBgHgl3EQf7DWb/content/tmp_files/load_file.txt b/o9FPT4oBgHgl3EQf7DWb/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6c52aaf336a98d7c62f0a13f8918f6d894e78c6 --- /dev/null +++ b/o9FPT4oBgHgl3EQf7DWb/content/tmp_files/load_file.txt @@ -0,0 +1,818 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf,len=817 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='13203v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='RA] 29 Jan 2023 THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We consider the moment map m : PVn → iu(n) for the action of GL(n) on Vn = ⊗2(Cn)∗ ⊗ Cn, and study the functional Fn = ∥m∥2 restricted to the projectivizations of the algebraic varieties of all n- dimensional Leibniz algebras Ln and all n-dimensional symmetric Leibniz algebras S n, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Firstly, we prove that [µ] ∈ PVn is a critical point if and only if Mµ = cµI + Dµ for some cµ ∈ R and Dµ ∈ Der(µ), where m([µ]) = Mµ ∥µ∥2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we give a description of the maxima and minima of the functional Fn : Ln → R, proving that they are actually attained at the symmetric Leibniz algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, for an arbitrary critical point [µ] of Fn : S n → R, we characterize the structure of [µ] by virtue of the nonnegative rationality of Dµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Finally, we classify the critical points of Fn : S n → R for n = 2, 3, and collect some natural questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Introduction In [12], Lauret studied the moment map for the variety of Lie algebras and obtained many remarkable results for example, a stratification of the Lie algebras variety and a description of the critical points, which turned to be very useful in proving that every Einstein solvmanifold is standard ([14]) and in the characterization of solitons ([4, 15]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is thus natural and interesting to ask whether Lauret’s results can be generalized, in some way, to varieties of algebras beyond Lie algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Motivated by the idea, the study has recently been extended to the variety of 3-Lie algebras in [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Here, a 3-Lie algebra is a natural generalization of the concept of a Lie algebra to the case where the fundamental multiplication operation is 3-ary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' See [26] for more details about the moment map for the variety of 3-Lie algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In this article, we study the moment map for the variety of Leibniz algebras, which are nonanticom- mutative versions of Lie algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' A Leibniz algebra is a vector space with a multiplication such that every left multiplication operator is a derivation, which was at first introduced by Bloh ([3]) and later independently rediscovered by Loday in the study of cohomology theory (see [18, 19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Leibniz algebras play an important role in different areas of mathematics and physics [5, 8, 11, 16, 17, 22, 23, 24], and we refer to [7] for a nice survey of Leibniz algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For the moment map in the frame of Leibniz algebras, it is defined as follows: Let GL(n) be the complex reductive Lie group acting naturally on the complex vector space Vn = ⊗2(Cn)∗ ⊗ Cn, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', the space of all n-dimensional complex algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The usual Hermitian inner product on Cn induces an U(n)- invariant Hermitian inner product on Vn, which is denoted by ⟨·, ·⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since gl(n) = u(n) + iu(n), we may 2010 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 14L30, 17B30, 53D20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moment map;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Variety of Leibniz algebras;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 2 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG define a function as follows m : PVn → iu(n), (m([µ]), A) = (dρµ)eA ∥µ∥2 , 0 � µ ∈ Vn, A ∈ iu(n), where (·, ·) is an Ad(U(n))-invariant real inner product on iu(n), and ρµ : GL(n) → R is defined by ρµ(g) = ⟨g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The function m is the moment map from symplectic geometry, corresponding to the Hamiltonian action U(n) of Vn on the symplectic manifold PVn (see [10, 21]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In this article, we shall study the critical points of the functional Fn = ∥m∥2 : PVn → R, and emphasize those critical points that lie in Ln and S n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Here, Ln, S n denote the projectivizations of the algebraic varieties of all n-dimensional Leibniz algebras, and all n-dimensional symmetric Leibniz algebras, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The article is organized as follows: In Section 2, we recall some fundamental results of Leibniz algebras (Def.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1) and symmetric Leibniz algebras (Def.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In Section 3, we first give the explicit expression of the moment map m : PVn → iu(n) in terms of Mµ, in fact m([µ]) = Mµ ∥µ∥2, [µ] ∈ PVn (Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we show that [µ] ∈ PVn is a critical point of Fn = ∥m∥2 : PVn → R if and only if Mµ = cµI + Dµ for some cµ ∈ R and Dµ ∈ Der(µ) (Thm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In Section 4, we prove that there exists a constant c > 0 such that the eigenvalues of cDµ are integers for any critical point [µ] ∈ PVn, and if moreover [µ] ∈ S n, we show that the eigenvalues are necessarily nonnegative (Thm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1), which generalizes the nonnegative rationality from Lie algebras to symmetric Leibniz algerbas (see [12, Thm 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Besides, we give a description of the extremal points of Fn : Ln → R, proving that the minimum value is attained at semisimple Lie algebras (Thm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6), while the maximum value is attained at the direct sum of the two-dimensional non-Lie symmetric Leibniz algebra with the abelian algebra (Thm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Finally, for an arbitrary critical point [µ] of Fn : S n → R, we characterize the structure of [µ] by virtue of the nonnegative rationality of Dµ (Thm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='10–Thm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In Section 5, we classify the critical points of Fn : S n → R with n = 2, 3, which shows that there exist many critical points that are not Lie algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, we prove that every 2-dimensional symmetric Leibniz algebra is isomorphic to a critical point of F2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' and there exist 3-dimensional symmetric Leibniz algebras which are not isomorphic to any critical point of F3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Finally in Section 6, we collect some natural questions concerning the critical points of Fn : Ln → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Preliminaries In this section, we recall some basic definitions and results of Leibniz algebras .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The ambient field is always assumed to be the complex number field C unless otherwise stated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 3 Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1 ([7, 18]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' A vector space L over C with a bilinear operation L × L → L, denoted by (x, y) �→ xy, is called a Leibniz algebra, if every left multiplication is a derivation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', x(yz) = (xy)z + y(xz) (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1) for all x, y, z ∈ L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Leibniz algebras are sometimes called left Leibniz algebras in the literature, and there is a corresponding notion of right Leibniz algebra, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', an algebra with the property that every right multiplication is a derivation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In some studies, the authors prefer to call a right Leibniz algebra a Leibniz algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We point out that for our purpose, it actually does not matter which notion is used since the opposite algebra of a left Leibniz algebra is a right Leibniz algebra and vice versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Following Mason and Yamskulna [20], we introduce the notion of the symmetric Leibniz algebra as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3 ([20]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' An algebra is called a symmetric Leibniz algebra if it is at the same time a left and a right Leibniz algebra, that is x(yz) = (xy)z + y(xz), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2) (xy)z = (xz)y + x(yz), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3) for all x, y, z ∈ L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Every Lie algebra is clearly a symmetric Leibniz algebra, and the converse is not true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In the following, we make the convention that an ideal of a Leibniz algebra always means a two-side ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let L be a Leibniz algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' L is called solvable if L(r) = 0 for some r ∈ N, where L(0) = L, L(k+1) = L(k)L(k), k ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If I, J are any two solvable ideals of L, then I + J is also a solvable ideal of L, so the maximum solvable ideal is unique, called the radical of g and denoted by Rad(L) ([7]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5 ([2]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' A Leibniz algebra L over a field of characteristic 0 admits a Levi decomposition, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', L = S + Rad(L) decomposes into the sum of a semisimple Lie subalgebra S and the radical satisfying S ∩ Rad(L) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' A Leibniz algebra L is called nilpotent if there exists a positive integer n such that any product of n elements in L, no matter how associated, is zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG For a Leibniz algebra, we define 1L := L, k+1L := L(kL), k ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Furthermore, we define L1 := L, Lk = k−1 � i=1 LiLk−i, k ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we have the following theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='7 ([7]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any integer k ≥ 1, then kL = Lk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, L is nilpotent if and only if there exists an positive integer n such that Ln = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If I, J are two nilpotent ideals of a Leibniz algebra L, then I + J is also a nilpotent ideal of L, consequently the maximum nilpotent ideal is unique, called the nilradical, denoted by N(L) ([7, 25]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8 ([25]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let L be a Leibniz algebra over a field of characteristic zero, then LRad(L), Rad(L)L ⊂ N(L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The moment map for complex algebras Let Cn be the n-dimensional complex vector space and Vn = ⊗2(Cn)∗ ⊗ Cn be the space of all complex n-dimensional algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The natural action of GL(n) = GL(Cn) on Vn is given by g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ(X, Y) = gµ(g−1X, g−1Y), g ∈ GL(n), X, Y ∈ Cn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1) Clearly, GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ is precisely the isomorphism class of µ, and 0 lies in the boundary of GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ for any µ ∈ Vn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By differentiating (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1), we obtain the natural action gl(n) on Vn, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ(X, Y) = Aµ(X, Y) − µ(AX, Y) − µ(X, AY), A ∈ gl(n), µ ∈ Vn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2) It follows that A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ = 0 if and only if A ∈ Der(µ), the derivation algebra of µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The usual Hermitian inner product on Cn gives an U(n)-invariant Hermitian inner product on Vn as follows ⟨µ, λ⟩ = � i, j,k ⟨µ(Xi, X j), Xk⟩⟨λ(Xi, X j), Xk⟩, µ, λ ∈ Vn, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3) where {X1, X2, · · · , Xn} is an arbitrary orthonormal basis of Cn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is easy to see that gl(n) = u(n) + iu(n) decomposes into skew-Hermitian and Hermitian transformations of Vn, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, there is an Ad(U(n))-invariant Hermitian inner product on gl(n) given by (A, B) = tr AB∗, A, B ∈ gl(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4) The moment map from symplectic geometry, corresponding to the Hamiltonian action of U(n) on the symplectic manifold PVn is defined as follows m : PVn → iu(n), (m([µ]), A) = (dρµ)eA ∥µ∥2 , 0 � µ ∈ Vn, A ∈ iu(n), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5) THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 5 where ρµ : GL(n) → R is given by ρµ(g) = ⟨g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Clearly, (dρµ)eA = 2⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, µ⟩ for A ∈ iu(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The square norm of the moment map is denoted by Fn : PVn → R, Fn([µ]) = ∥m([µ])∥2 = (m([µ]), m([µ])), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6) In order to express m([µ]) explicitly, we define Mµ ∈ iu(n) as follows Mµ = 2 � i Lµ Xi(Lµ Xi)∗ − 2 � i (Lµ Xi)∗Lµ Xi − 2 � i (Rµ Xi)∗Rµ Xi, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='7) where the left and right multiplication Lµ X, Rµ X : Cn → Cn by X of the algebra µ, are given by Lµ X(Y) = µ(X, Y) and Rµ X(Y) = µ(Y, X) for all Y ∈ Cn, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is not hard to prove that ⟨MµX, Y⟩ =2 � i, j ⟨µ(Xi, X j), X⟩⟨µ(Xi, X j), Y⟩ − 2 � i, j ⟨µ(Xi, X), X j⟩⟨µ(Xi, Y), X j⟩ − 2 � i, j ⟨µ(X, Xi), X j⟩⟨µ(Y, Xi), X j⟩ (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8) for X, Y ∈ Cn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that if the algebra µ is commutative or anticommutative, then the second and third term of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8) are the same, and in this case, Mµ coincides with [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any 0 � µ ∈ Vn, we have m([µ]) = Mµ ∥µ∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In particular, (Mµ, A) = 2⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, µ⟩ for any A ∈ iu(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any A ∈ iu(n), we have (Mµ, A) = tr MµA∗ = tr MµA and tr MµA = 2 tr � i Lµ Xi(Lµ Xi)∗A − 2 tr � i ((Lµ Xi)∗Lµ Xi + (Rµ Xi)∗Rµ Xi)A =: I + II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that I =2 � i tr Lµ Xi(Lµ Xi)∗A =2 � i tr(Lµ Xi)∗ALµ Xi =2 � i, j ⟨(Lµ Xi)∗ALµ Xi(X j), X j⟩ =2 � i, j ⟨Aµ(Xi, X j), µ(Xi, X j)⟩, 6 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG and II = − 2 � i, j ⟨((Lµ Xi)∗Lµ Xi + (Rµ Xi)∗Rµ Xi)AX j, X j⟩ = − 2 � i, j ⟨µ(Xi, AX j), µ(Xi, X j)⟩ − 2 � i, j ⟨µ(AX j, Xi), µ(X j, Xi)⟩ = − 2 � i, j ⟨µ(AXi, X j) + µ(Xi, AX j), µ(Xi, X j)⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2), it follows that (Mµ, A) = 2⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, µ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since A ∈ iu(n), we have ⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, µ⟩ = ⟨µ, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The Lemma is completed by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any µ ∈ Vn, then (i) tr MµD = 0 for any D ∈ Der(µ) ∩ iu(n);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) tr Mµ[A, A∗] ≥ 0 for any A ∈ Der(µ), and equality holds if and only if A∗ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (i), it follows from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1 and the fact that D is a Hermitian derivation of µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (ii), it follows from that tr Mµ[A, A∗] = 2⟨A∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, A∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ⟩ ≥ 0 for any A ∈ Der(µ), and the fact A∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ = 0 if and only if A∗ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The moment map m : PVn → iu(n), the functional square norm of the moment map Fn = ∥m∥2 : PVn → R and the gradient of Fn are, respectively, given by Fn([µ]) = tr M2 µ ∥µ∥4 , grad(Fn)[µ] = 8π∗(Mµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ ∥µ∥4 , [µ] ∈ PVn, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='9) where π∗ denotes the derivative of π : Vn\\{0} → PVn, the canonical projection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, the following statements are equivalent: (i) [µ] ∈ PVn is a critical point of Fn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) [µ] ∈ PVn is a critical point of Fn|GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (iii) Mµ = cµI + Dµ for some cµ ∈ R and Dµ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6) and Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1, we have Fn([µ]) = tr M2 µ ∥µ∥4 for any [µ] ∈ PVn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' To prove the second one, we only need to compute the gradient of Fn : Vn \\ {0} → R, Fn(µ) = tr M2 µ ∥µ∥4 , and then to project it via π∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If µ, λ ∈ Vn with µ � 0, then Re⟨grad(Fn)µ, λ⟩ = d d �����t=0 Fn(µ + tλ) = d d �����t=0 1 ∥µ + tλ∥4 (Mµ+tλ, Mµ+tλ) = − 4 Re⟨Fn(µ) ∥µ∥2 µ, λ⟩ + 2 ∥µ∥4 ( d d �����t=0 Mµ+tλ, Mµ) THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 7 We claim that ( d d ���t=0 Mµ+tλ, A) = 4 Re⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, λ⟩ for any A ∈ iu(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Indeed, by Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1, we have ( d d �����t=0 Mµ+tλ, A) = d d �����t=0 (Mµ+tλ, A) = 2 d d �����t=0 ⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (µ + tλ), µ + tλ⟩ = 2⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='λ, µ⟩ + 2⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, λ⟩ = 4 Re⟨A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ, λ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The claim is therefore proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows that grad(Fn)µ = −4 Fn(µ) ∥µ∥2 µ + 8(Mµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ ∥µ∥4 , and consequentely grad(Fn)[µ] = 8π∗(Mµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ ∥µ∥4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So the first part of the theorem is proved, and the following is to prove the equivalence among the statements (i), (ii) and (iii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (i) ⇔ (ii) : The equivalence follows from that grad(Fn) is tangent to the GL(n)-orbits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Indeed grad(Fn)[µ] = 8π∗(Mµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ ∥µ∥4 = 8 ∥µ∥4 π∗( d d �����t=0 etMµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ) = 8 ∥µ∥4 d d �����t=0 etMµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] ∈ T[µ](GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (iii) ⇒ (i) : By (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2), we know that I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ = −µ, and (Mµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ = (cµI + Dµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ = −cµµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows that grad(Fn)[µ] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (i) ⇒ (iii) : Since grad(Fn)[µ] = 0, then (Mµ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ ∈ ker π∗µ = Cµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Mµ = cI + D for some c ∈ C and D ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Clearly [D, D∗] = 0, we conclude by Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2 that D∗ is also a derivation of µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In particular, (c − ¯c)I = D∗ − D ∈ Der(µ), thus c = ¯c ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ In the frame of algebras, a result due to Ness can be stated as follows Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4 ([21]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If [µ] is a critical point of the functional Fn : PVn �→ R then (i) Fn|GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] attains its minimum value at [µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) [λ] ∈ GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] is a critical point of Fn if and only if [λ] ∈ U(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let [µ] ∈ PVn be a critical point of Fn with Mµ = cµI+Dµ for some cµ ∈ R and Dµ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we have (i) cµ = tr M2 µ tr Mµ = − 1 2 tr M2 µ ∥µ∥2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) If tr Dµ � 0, then cµ = − tr D2 µ tr Dµ and tr Dµ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since Mµ = cµI + Dµ, by Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1 and Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2 we have tr Mµ = (Mµ, I) = 2⟨µ, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='µ⟩ = −2∥µ∥2 < 0, tr M2 µ = tr Mµ(cµI + Dµ) = cµ tr Mµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So cµ = tr M2 µ tr Mµ = − 1 2 tr M2 µ ∥µ∥2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If tr Dµ � 0, then 0 = tr MµDµ = cµ tr Dµ + tr D2 µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So cµ = − tr D2 µ tr Dµ and tr Dµ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ 8 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG Remark 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In fact, tr Dµ = 0 if and only if Dµ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Indeed, it follows from that 0 = cµ tr Dµ + tr D2 µ and Dµ is hermitian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The critical points of the variety of Leibniz algebras The spaces Ln, Sn of all n-dimensional Leibniz algebras and symmetric Leibniz algebras are alge- braic sets since they are given by polynomial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Denote by Ln and S n the projective algebraic varieties obtained by projectivization of Ln and Sn, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then by Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3, we know that the critical points of Fn : Ln → R, and Fn : S n → R are precisely the critical points of Fn : PVn → R which lie in Ln and S n, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The rationality and nonnegative property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The following rationality and nonnegative property are generalizations of [12] from Lie algebras to Leibniz algebras and symmetric Leibniz algebras, re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let [µ] ∈ PVn be a critical point of Fn : PVn → R with Mµ = cµI + Dµ for some cµ ∈ R and Dµ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then there exists a constant c > 0 such that the eigenvalues of cDµ are integers prime to each other, say k1 < k2 < · · · < kr ∈ Z with multiplicities d1, d2, · · · , dr ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If moreover [µ] ∈ S n, then the integers are nonnegative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The case Dµ = 0 is trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In the following, we assume that Dµ is nonzero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that Dµ is Hermitian, then we have the following orthogonal decomposition Cn = l1 ⊕ l2 ⊕ · · · ⊕ lr, r ≥ 2 where li := {X ∈ Cn|DµX = ciX} are the eigenspaces of Dµ corresponding to the eigenvalues c1 < c2 < · · < cr ∈ R, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Set di = dim li ∈ N, 1 ≤ i ≤ r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since Dµ is a derivation, we have the following bracket relations µ(li, lj) ⊂ lk if ci + cj = ck, for all 1 ≤ i, j, k ≤ r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Conversely, if we define a linear transformation A : Cn → Cn by A|li = aiIdli, where a1, a2, · · · , ar ∈ R satisfying ai + aj = ak for all i, j, k such that ci + cj = ck, then A is a Hermitian derivation of µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Clearly, all such derivations form a real vector space, which can be identified with W := {(w1, w2, · · · , wr) ∈ Rr|wi + w j = wk if ci + cj = ck, 1 ≤ i, j, k ≤ r}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We endow Rr with the usual inner product, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', ⟨x, y⟩ = � i xiyi, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1) for any x = (x1, x2, · · · , xr), y = (y1, y2, · · · , yr) ∈ Rr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 9 For any derivation A ∈ W, by Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2 and Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5, we have 0 = tr MµA = tr(cµI + Dµ)A = tr(Dµ − αI)A, where α = tr D2 µ tr Dµ = c2 1d1+c2 2d2+···+c2 r dr c1d1+c2d2+···+crdr > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we see that (d1(c1 − α), d2(c2 − α), · · · , dr(cr − α)) ⊥ W relative to (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Put F := W⊥, then by definition it is easy to see that F = span1≤i, j,k≤r{ei + ej − ek : ci + cj = ck}, where ei belongs to Rr having 1 in the i-th position and 0 elsewhere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let {ei1 +ej1 −ek1, · · · , eis +ejs −eks} be a basis of F, then (d1(c1 − α), d2(c2 − α), · · · , dr(cr − α)) = s � p=1 bp(eip + ejp − ekp), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2) for some b1, b2, · · · , bs ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Put E = \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed ei1 + ej1 − ek1 ei2 + ej2 − ek2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' eis + ejs − eks \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 ∈ Zs×r, then EET ∈ GL(s, Z), and (EET)−1 ∈ GL(s, Q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2) and the definition of E, we have \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed d1(c1 − α) d2(c2 − α) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' dr(cr − α) \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 r×1 = ET \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed b1 b2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' bs \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 , E \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed c1 c2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' cr \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 r×1 = \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 0 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 0 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 , E \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 1 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 r×1 = \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 1 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By the left multiplication of E on (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2), we have \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 0 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 0 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 − α \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 1 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 = ED−1ET \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed b1 b2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' bs \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 , where D = diag(d1, d2, · · · , dr).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is easy to see that (ED−1ET) ∈ GL(s, Q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Consequently D \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed c1 − α c2 − α .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' cr − α \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 r×1 = −αET(ED−1ET)−1 \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 1 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 , and 1 α \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed c1 c2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' cr \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 r×1 = \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 r×1 − D−1ET(ED−1ET)−1 \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed 1 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 s×1 ∈ Qr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So there exists a constant c > 0 such that the eigenvalues of cDµ are integers prime to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 10 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG If moreover [µ] ∈ S n, we claim that the integers are nonnegative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Indeed, assume that 0 � X ∈ Cn satisfies DµX = c1X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we have c1Lµ X = [Dµ, Lµ X], c1Rµ X = [Dµ, Rµ X].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows that c1 tr Lµ X(Lµ X)∗ = tr[Dµ, Lµ X](Lµ X)∗ = tr[Mµ, Lµ X](Lµ X)∗ = tr Mµ[Lµ X, (Lµ X)∗].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3) Similarly c1 tr Rµ X(Rµ X)∗ = tr Mµ[Rµ X, (Rµ X)∗].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4) Since Lµ X, Rµ X are derivations of µ, by Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2 we have c1 tr Lµ X(Lµ X)∗ ≥ 0 and c1 tr Rµ X(Rµ X)∗ ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If Lµ X or Rµ X is not zero, then c1 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If Lµ X and Rµ X are both zero, then X lies in the center of µ, and by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8) ⟨MµX, X⟩ = 2 � i, j |⟨µ(Xi, X j), X⟩|2 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5) Since Mµ = cµI+Dµ, then 0 ≤ ⟨MµX, X⟩ = (cµ+c1)⟨X, X⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5 that c1 ≥ −cµ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ Remark 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let [µ] be a critical point of Fn : S n → R with Mµ = cµI + Dµ for some cµ ∈ R and Dµ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If µ is nilpotent, then Dµ is positive definite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Consequently, all nilpotent critical points of Fn : S n → R are N-graded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Indeed, assume that 0 � X ∈ Cn satisfies DµX = c1X, where c1 is the smallest eigenvalue of Dµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1, we know that c1 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Suppose that c1 = 0, then tr Mµ[Lµ X, (Lµ X)∗] = 0, and tr Mµ[Rµ X, (Rµ X)∗] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Using Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2, (Lµ X)∗ and (Rµ X)∗ are derivations of µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let l be the symmetric Leibniz algebra (Cn, µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Consider the orthogonal decomposition of l l = n1 ⊕ n2 ⊕ · · · ⊕ np, where p ≥ 2, µ(l, l) = n2 ⊕ · · · ⊕ np, µ(l, µ(l, l)) = l3 ⊕ · · · ⊕ lp, · · · .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since Lµ X and (Lµ X)∗ are derivations of µ, then (Lµ X)∗ leaves each li invariant and Lµ X(li) ⊂ li+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So tr Lµ X(Lµ X)∗ = 0, and Lµ X = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Similarly, one concludes that Rµ X = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' That is, X lies in the center of l, which is a contradiction since in this case we have c1 ≥ −cµ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Dµ is positive definite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 11 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The minima and maxima of Fn : Ln → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Following from [12], we introduce the notion of the type of a critical point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Definition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The data set (k1 < k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d1, d2, · · · , dr) in Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1 is called the type of the critical point [µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any fixed dimension n, it follows from the finiteness of the partitions of n in the proof of Theo- rem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1 that there are only finitely many types of critical points of Fn : PVn → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proposition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let [µ] ∈ PVn be a critical point of Fn with type α = (k1 < k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d1, d2, · · · , dr).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we have (i) If α = (0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' n), then Fn([µ]) = 4 n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) If α � (0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' n), then Fn([µ]) = 4 � n − (k1d1+k2d2+···+krdr)2 (k2 1d1+k2 2d2+···+k2r dr) �−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We suppose that Mµ = cµI + Dµ, ∥µ∥ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since tr Mµ = −2⟨µ, µ⟩ = −2, then tr M2 µ = tr Mµ(cµI + Dµ) = cµ tr Mµ = −2cµ, and Fn([µ]) = tr Mµ2 ∥µ∥4 = tr Mµ2 = −2cµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (i), we have Dµ = 0, so Mµ = cµI and cµn = tr Mµ = −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Thus cµ = − 2 n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Fn([µ]) = −2cµ = 4 n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (ii), we have Dµ � 0, and cµ = − tr D2 µ tr Dµ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that Fn([µ]) = tr Mµ2 = tr(cµI + Dµ)2 = c2 µn + cµ tr Dµ = 1 4Fn([µ])2n − 1 2Fn([µ]) tr Dµ, so we have 1 Fn([µ]) = 1 4n − 1 2Fn([µ]) tr(Dµ) = 1 4n + 1 4cµ tr Dµ = 1 4(n − (tr Dµ)2 tr D2µ ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows that Fn([µ]) = 4 � n − (k1d1+k2d2+···+krdr)2 (k2 1d1+k2 2d2+···+k2r dr) �−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume [µ] ∈ PVn, then [µ] is a critical point of Fn : PVn → R with type (0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' n) if and only if Fn([µ]) = 4 n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, 4 n is the minimum value of Fn : PVn → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any 0 � µ ∈ Vn, we use x1, x2, · · · , xn ∈ R denote the eigenvalues of Mµ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that tr Mµ = −2∥µ∥2, then we have Fn([µ]) = tr Mµ2 ∥µ∥4 = 4 tr Mµ2 (tr Mµ)2 = 4 (x2 1 + x2 2 + · · · + x2 n) (x1 + x2 + · · · + xn)2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is easy to see that Fn([µ]) ≥ 4 n with equality holds if and only if x1 = x2 = · · · = xn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So [µ] is a critical point of Fn : PVn → R with type (0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' n) if only if Mµ is a constant multiple of I, if and only Fn attains its minimum value 4 n at [µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ 12 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG The following theorem shows that even in the frame of Leibniz algebras, the semisimple Lie algebras are still the only critical points of Fn : Ln → R attaining the minimum value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that there exists a semisimple Lie algebra of dimension n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then Fn : Ln → R attains its minimum value at a point [λ] ∈ GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] if and only if µ is a semisimple Lie algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In such a case, Fn([λ]) = 4 n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that µ is a complex semisimple Lie algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows from [12, Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3] that Fn : Ln → R attains its minimum value 4 n at a point [λ] ∈ GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Conversely, assume Fn : Ln → R attains its minimum value at a point [λ] ∈ GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='[µ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then by hypothesis, there exists a semisimple Lie algebra of dimension n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The first part of the proof and Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5 imply that Mλ = cλI with cλ < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' To prove µ is semisimple, it suffices to show that l = (λ, Cn) is semisimple.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Consider the following orthogonal decompositions: (i) l = h ⊕ s, where s is the radical of λ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) s = a ⊕ nλ, where nλ = λ(s, s) is a nilpotent ideal of l;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (iii) nλ = v ⊕ zλ, where zλ = {Z ∈ nλ : λ(Z, nλ) = λ(nλ, Z) = 0} is the center of nλ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Clearly, zλ is a ideal of l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We have l = h ⊕ a ⊕ v ⊕ zλ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Suppose that zλ � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let {Hi}, {Ai}, {Vi}, {Zi} be an orthonormal basis of h, a, v, and zλ, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Put {Xi} = {Hi} ∪ {Ai} ∪ {Vi} ∪ {Zi}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any 0 � Z ∈ zλ, by hypothesis we have 0 > ⟨MλZ, Z⟩ =2 � ij |⟨λ(Xi, X j), Z⟩|2 − 2 � ij |⟨λ(Z, Xi), X j⟩|2 − 2 � ij |⟨λ(Xi, Z), X j⟩|2 =2 � ij � |⟨λ(Zi, H j), Z⟩|2 + |⟨λ(Hi, Z j), Z⟩|2 + |⟨λ(Zi, A j), Z⟩|2 + |⟨λ(Ai, Z j), Z⟩|2� + α(Z) − 2 � ij � |⟨λ(Z, Hi), Z j⟩|2 + |⟨λ(Z, Ai), Z j⟩|2� − 2 � ij � |⟨λ(Hi, Z), Z j⟩|2 + |⟨λ(Ai, Z), Z j⟩|2� , where α(Z) = 2 � ij |⟨λ(Yi, Y j), Z⟩|2 ≥ 0, {Yi} = {Hi} ∪ {Ai} ∪ {Vi}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This implies 0 > � k ⟨MλZk, Zk⟩ = � k α(Zk) ≥ 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So zλ = 0, and consequently, nλ = λ(s, s) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Suppose that s � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let {Hi}, {Ai} be an orthonormal basis of h, s, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For any 0 � A ∈ s, we have 0 > ⟨MλA, A⟩ =2 � ij � |⟨λ(Hi, A j), A⟩|2 + |⟨λ(Ai, H j), A⟩|2� + β(A) − 2 � ij |⟨λ(A, Hi), A j⟩|2 − 2 � ij |⟨λ(Hi, A), A j⟩|2 where β(A) = 2 � ij |⟨λ(Hi, H j), A⟩|2 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This implies 0 > � k ⟨MλAk, Ak⟩ = � k β(Ak) ≥ 0, which is a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So s = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Therefore λ is a semisimple Lie algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 13 Remark 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By the proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6, we know that if [µ] ∈ Ln for which there exists [λ] ∈ GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] such that Mλ is negative definite, then µ is a semisimple Lie algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We say that an algebra λ degenerates to µ, write as λ → µ if µ ∈ GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='λ, the closure of GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='λ with respect to the usual topology of Vn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The degeneration λ → µ is called direct degeneration if there are no nontrivial chains: λ → ν → µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The degeneration level of an algebra is the maximum length of chain of direct degenerations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8 ([9]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' An n-dimensional Leibniz algebra is of degeneration level one if and only if it is isomorphic to one of the following (1) µhy is a Lie algebra: µhy(X1, Xi) = Xi, i = 2, · · · , n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (2) µhe is a Lie algebra: µhe(X1, X2) = X3;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (3) µsy is a symmetric Leibniz algebra: µsy(X1, X1) = X2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' where {X1, · · · , Xn} is a basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The following theorem shows that in the frame of Leibniz algebras, the maximum value of Fn : Ln → R is attained at symmetric Leibniz algebras that are non-Lie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The functional Fn : Ln → R attains its maximal value at a point [µ] ∈ Ln, n ≥ 2 if and only if µ is isomorphic to the symmetric Leibniz algebra µsy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In such a case, Fn([µ]) = 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that Fn : Ln → R attains its maximal value at a point [µ] ∈ Ln, n ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3, we know that [µ] is also a critical of Fn : PVn → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then it follows Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4 that Fn|GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] also attains its minimum value at a point [µ] , consequently Fn|GL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] is a constant, so GL(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] = U(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [µ] (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6) The relation (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6) implies that the only non-trivial degeneration of µ is 0 ([13, Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1]), conse- quently the degeneration level of µ is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is easy to see that the critical point [µhy] is of type (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, n − 1), [µhe] is of type (2 < 3 < 4;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, n − 3, 1) and [µsy] is of type (3 < 5 < 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, n − 2, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By Proppsition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='4, we know Fn([µhy]) = 4, Fn([µhe]) = 12, Fn([µsy]) = 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So the theorem is proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The structure for the critical points of Fn : S n → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that the maxima and minima of the functional Fn : Ln → R are actually attained at symmetric Leibniz algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In the following, we characterize the structure for the critical points of Fn : S n → R by virtue of the nonnegative property (see Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 14 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let [µ] ∈ S n be a critical point of Fn : S n → R with Mµ = cµI + Dµ of type (0 < k2 < · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d1, d2, · · · , dr) and consider l = l0 ⊕ l+ (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='7) the direct sum of eigenspaces of Dµ with eigenvalues equal to zero, and larger than zero, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then the following conditions hold: (i) (Lµ A)∗, (Rµ A)∗ ∈ Der(µ) for any A ∈ l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) l0 is a reductive Lie subalgebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (iii) l+ is the nilradical of µ, and it corresponds to a critical point of type (k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d2, · · · , dr) for the functional Fm : S m → R, where m = dim l+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (i), since Dµ, Lµ A and Rµ A are derivations of µ, we have [Dµ, Lµ A] = Lµ DµA = 0, [Dµ, Rµ A] = Rµ DµA = 0, for any A ∈ l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then it follows that tr Mµ[Lµ A, (Lµ A)∗] = tr(cµI + Dµ)[Lµ A, (Lµ A)∗] = tr Dµ[Lµ A, (Lµ A)∗] = tr[Dµ, Lµ A](Lµ A)∗ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So (Lµ A)∗ ∈ Der(µ) by Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Similarly, we have (Rµ A)∗ ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This proves (i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (ii), let l0 = h ⊕ z be the orthogonal decomposition, where h = µ(l0, l0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We claim that z is the center of l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Indeed, by the orthogonal decomposition of eigenspaces (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='7), we have Lµ A = � Lµ A|l0 0 0 Lµ A|l+ � , Rµ A = � Rµ A|l0 0 0 Rµ A|l+ � , for any A ∈ l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since h is Der(l0)-invariant, then by (i) we know that Lµ A|l0, Rµ A|l0 ∈ Der(l0) are of the form Lµ A|l0 = � Lµ A|h 0 0 0 � , Rµ A|l0 = � Rµ A|h 0 0 0 � , for any A ∈ l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So µ(l0, z) = µ(z, l0) = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', z lies in the center of l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, it follows that h = µ(h, h).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let h = ¯r ⊕ ¯s be the orthogonal decomposition, where ¯s is the radical of h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since ¯s is Der(h)-invariant, then by (i), we know that Lµ H|h, Rµ H|h ∈ Der(h) are of the form Lµ H|h = � Lµ H|¯r 0 0 Lµ H|¯s � , Rµ H|h = � Rµ H|¯r 0 0 Rµ H|¯s � , THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 15 for any H ∈ h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Clearly, ¯r is an ideal of h, and h = µ(h, h) = µ(¯r, ¯r) ⊕ µ(¯s, ¯s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So ¯s = µ(¯s, ¯s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since ¯s is solvable, we conclude that ¯s = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Therefore h is a semisimple Lie algebra by Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='5, and moreover we deduce that z is the center of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This proves (ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' For (iii), it follows from (ii) that s := z ⊕ l+ is the radical of l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that Z ∈ z belongs to the nilradical of µ, then Lµ Z, Rµ Z : l → l are necessarily nilpotent derivations of l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' By (i), we know that for any Z ∈ z, the derivations (Lµ Z)∗, (Rµ Z)∗ vanish on l0, and in particularly, (Lµ Z)∗Z = 0, (Rµ Z)∗Z = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Hence [(Lµ Z)∗, Lµ Z] = 0, [(Rµ Z)∗, Rµ Z] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' That is, Lµ Z and Rµ Z are both normal and nilpotent operators, so Lµ Z = Rµ Z = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Z lies in the center of l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This however, contradicts Z ∈ l0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Z = 0 and l+ is the nilradical of l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Set n := l+, and denote by µn the corresponding element in S m, where m = dim l+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that {Ai} is an orthonormal basis of l0, then by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8), we have Mµ|n = Mµn + 2 � i ([Lµ Ai, (Lµ Ai)∗] + [Rµ Ai, (Rµ Ai)∗])|n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8) Using (i) and Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2, it follows that tr Mµn[Lµ Ai, (Lµ Ai)∗]|n = tr Mµn[Rµ Ai, (Rµ Ai)∗]|n = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since tr Mµ[Lµ Ai, (Lµ Ai)∗] = tr Mµ[Rµ Ai, (Rµ Ai)∗] = 0, by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8) we have tr Mµ[Lµ Ai, (Lµ Ai)∗] = tr Mµ|n[Lµ Ai, (Lµ Ai)∗]n = 0, tr Mµ[Rµ Ai, (Rµ Ai)∗] = tr Mµ|n[Rµ Ai, (Rµ Ai)∗]n = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Put T = � i([Lµ Ai, (Lµ Ai)∗] + [Rµ Ai, (Rµ Ai)∗])|n, then we have tr T 2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Since T is Hermitian, we conclude that T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So n = l+ corresponds to a critical point of type (k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d2, · · · , dr) for the functional Fm : S m → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ In fact, it follows from the proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='10 that Lµ Z, Rµ Z are normal operators for any Z ∈ z(l0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Next, we characterize the critical points that lie in S n in terms of those which are nilpotent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='11 (Solvable extension).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that a is an abelian Lie algebra of dimension d1, and [λ] is critical point of Fm : S m → R of type (k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d2, · · · , dr) where k2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Consider the direct sum µ = a ⋉ρ λ, where ρ = (Lρ, Rρ), and Lρ : Cd1 × Cm → Cm, Rρ : Cm × Cd1 → Cm are bilinear mappings such that µ is a symmetric Leibniz algebra with bracket relations given by µ(A + X, B + Y) := Lρ A(Y) + Rρ B(X) + λ(X, Y) for all A, B ∈ Cd1, X, Y ∈ Cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that the following conditions are satisfied 16 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG (i) [Dλ, Lρ A] = 0, [Dλ, Rρ A] = 0, ∀A ∈ Cd1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) [Lρ A, (Lρ A)∗] = 0, [Rρ A, (Rρ A)∗] = 0, ∀A ∈ Cd1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' and for each 0 � A ∈ Cd1, Lρ A or Rρ A is not zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If we extend the Hermitian inner product on Cm by setting ⟨A, B⟩ = − 2 cλ (tr Lρ A(Lρ B)∗ + tr Rρ A(Rρ B)∗), A, B ∈ Cd1, then [µ] is a solvable critical point of type (0 < k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d1, d2, · · · , dr) for Fn : S n → R, n = d1+m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Put n = (Cm, λ), and let {Xi} be an orthonormal basis of Cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It follows from (ii) that (Lρ A)∗, (Rρ A)∗ ∈ Der(λ) for all A ∈ Cd1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then we have ⟨MµX, A⟩ = −2 � i, j ⟨µ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 � i, j ⟨µ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ = −2 � i, j ⟨λ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 � i, j ⟨λ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ = −2 tr(Rρ A)∗Rλ X − 2 tr(Lρ A)∗Lλ X = 0, for any A ∈ Cd1, X ∈ Cm since λ is nilpotent and (Lρ A)∗, (Rρ A)∗ ∈ Der(λ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Mµ leaves a and n invariant, and moreover, it is not hard to see that Mµ|n = Mλ = cλI + Dλ by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' On the other hand, we have ⟨MµA, B⟩ = −2 � i, j ⟨µ(Xi, A), X j⟩⟨µ(Xi, B), X j⟩ − 2 � i, j ⟨µ(A, Xi), X j⟩⟨µ(B, Xi), X j⟩ = −2(tr Lρ A(Lρ B)∗ + tr Rρ A(Rρ B)∗) = cλ⟨A, B⟩, for any A, B ∈ Cd1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Mµ = cµI + Dµ, where cµ = cλ and Dµ = � 0 0 0 Dλ � ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='12 (General extension).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that f = h ⊕ z is a reductive Lie algebra of dimension d1, and [λ] is critical point of Fm : S m → R of type (k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d2, · · · , dr) where k2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Consider the direct sum µ = f ⋉ρ λ, where ρ = (Lρ, Rρ), and Lρ : Cd1 × Cm → Cm, Rρ : Cm × Cd1 → Cm are bilinear mappings such that µ is a symmetric Leibniz algebra with bracket relations given by µ(A + X, B + Y) := adf A(B) + Lρ A(Y) + Rρ B(X) + λ(X, Y) for all A, B ∈ Cd1, X, Y ∈ Cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Assume that the following conditions are satisfied THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 17 (i) [Dλ, Lρ A] = 0, [Dλ, Rρ A] = 0, ∀A ∈ Cd1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (ii) [Lρ Z, (Lρ Z)∗] = 0, [Rρ Z, (Rρ Z)∗] = 0, ∀Z ∈ z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' and for each 0 � Z ∈ z, Lρ Z or Rρ Z is not zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Let ⟨·, ·⟩1 be a Hermitian inner product on f and {Hi | Hi ∈ h} ∪ {Zi |Zi ∈ z} be an orthonormal basis of (f, ⟨·, ·⟩1) such that (adf Hi)∗1 = − adf Hi, (Lρ Hi)∗ = −Lρ Hi, (Rρ Hi)∗ = −Rρ Hi for all i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' If we extend the Hermitian inner product on Cm by setting ⟨A, B⟩ = − 2 cλ (tr adf A(adf B)∗1 + tr Lρ A(Lρ B)∗ + tr Rρ A(Rρ B)∗), A, B ∈ Cd1, then [µ] is a critical point of type (0 < k2 < · · · < kr;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' d1, d2, · · · , dr) for Fn : S n → R, n = d1 + m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Put n = (Cm, λ), and let {Ai} = {Hi, Zi} be the orthonormal basis of (Cd1, ⟨·, ·⟩1) as in hypothesis, and {Xi} be an orthonormal basis of Cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Then for any A ∈ Cd1, X ∈ Cm, we have ⟨MµX, A⟩ = −2 � i, j ⟨µ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 � i, j ⟨µ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ = −2 � i, j ⟨λ(Xi, X), X j⟩⟨µ(Xi, A), X j⟩ − 2 � i, j ⟨λ(X, Xi), X j⟩⟨µ(A, Xi), X j⟩ = −2 tr(Rρ A)∗Rλ X − 2 tr(Lρ A)∗Lλ X = 0, since λ is nilpotent and (Lρ A)∗, (Rρ A)∗ ∈ Der(λ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Mµ leaves f and n invariant, and it is not hard to see that Mµ|n = Mλ = cλI + Dλ by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Moreover, for any A, B ∈ Cd1, we have ⟨MµA, B⟩ = 2 � i, j ⟨µ(Ai, A j), A⟩⟨µ(Ai, A j), B⟩ − 2 � i, j ⟨µ(Ai, A), A j⟩⟨µ(Ai, B), A j⟩ − 2 � i, j ⟨µ(Xi, A), X j⟩⟨µ(Xi, X), X j⟩ − 2 � i, j ⟨µ(A, Ai), A j⟩⟨µ(B, Ai), A j⟩ − 2 � i, j ⟨µ(A, Xi), X j⟩⟨µ(X, Xi), X j⟩ = −2(tr adf A(adf B)∗1 + tr Lρ A(Lρ B)∗ + tr Rρ A(Rρ B)∗) = cλ⟨A, B⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So Mµ = cµI + Dµ, where cµ = cλ, and Dµ = � 0 0 0 Dλ � ∈ Der(µ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' □ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Examples In this section, we classify the critical points of the functional Fn : S n → R for n = 2 and 3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We show that every two-dimensional symmetric Leibniz algebra is isomorphic to a critical 18 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG point of F2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' and there exist three-dimensional symmetric Leibniz algebras which are not isomorphic to any critical point of F3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Two-dimensional case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that there are only two non-abelian two-dimensional symmetric Leib- niz algebras up to isomorphism, which is defined by Lie: [e1, e2] = e2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' non-Lie: [e1, e1] = e2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' It is easy to see that the Lie algebra is a critical point of F2 with type (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 1), and the critical value is 4;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The non-Lie symmetric Leibniz algebra is a critical point of F2 with type (1 < 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 1), and the critical value is 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Three-dimensional case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' The classification of 3-dimensional Leibniz algebras over C can be found in [1, 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' We classify the critical points of the functional F3 : S 3 → R as follows TABLE I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' non-zero 3-dimensional symmetric Leibniz algebras, critical types and critical values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' g Type Multiplication table Critical type Critical value L1 Lie � [e1, e2] = e3 (1 < 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, 1) 12 L2 Lie � [e1, e2] = e2 (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 2) 4 L3(α), α � 0 Lie � [e3, e1] = e1, [e3, e2] = αe2, (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 2) 4 L4 Lie � [e3, e1] = e1 + e2, [e3, e2] = e2 − − L5 Lie �[e3, e1] = 2e1, [e3, e2] = −2e2 [e1, e2] = e3 (0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 3) 4 3 S1 non-Lie � [e3, e3] = e1 (3 < 5 < 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 1, 1) 20 S2 non-Lie � [e2, e2] = e1, [e3, e3] = e1 (1 < 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, 1) 12 S3(2) non-Lie �[e2, e2] = 2e1, [e3, e2] = e1, [e3, e3] = e1 − − S3(β), β � 2 non-Lie �[e2, e2] = βe1, [e3, e2] = e1, [e3, e3] = e1 (1 < 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, 1) 12 S4 non-Lie � [e1, e3] = e1 (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 2) 4 S5(α), α � 0 non-Lie �[e1, e3] = αe1, [e2, e3] = e2, [e3, e2] = −e2 (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 2) 4 S6 non-Lie �[e2, e3] = e2, [e3, e2] = −e2, [e3, e3] = e1 − − S7(α), α � 0 non-Lie � [e1, e3] = αe1, [e2, e3] = e2 (0 < 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 2) 4 S8 non-Lie � [e1, e3] = e1 + e2, [e3, e3] = e1 − − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Some questions By Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1, we know that eigenvalue types for the critical points of Fn : S n → R are neces- sarily nonnegative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' From Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='6 and Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='9, we know that the maxima and minima of the THE MOMENT MAP FOR THE VARIETY OF LEIBNIZ ALGEBRAS 19 functional Fn : Ln → R are actually attained at the symmetric Leibniz algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' So it is natural and interesting to ask the following questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Question 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Do all critical points of Fn : Ln → R necessarily have nonnegative eigenvalue types?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Question 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Do all critical points of Fn : Ln → R necessarily lie in S n?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Note that if Question 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='2 holds, then Question 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1 holds .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Acknowledgement This paper is partially supported by NSFC (11931009 and 12131012) and NSF of Tianjin (19JCY- BJC30600).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Data Availability Statements The author declares that all data supporting the findings of this study are available within the article References [1] Ayupov, Sh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Omirov, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' : On Leibniz algebras, in: Algebra and Operator Theory (Tashkent,1997), KluwerAcad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Publ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Dordrecht, 1998, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [2] Barnes, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': On Levi’s theorem for Leibniz algebras, Bull.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Austral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 86 (2012), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, 184–185.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [3] Bloh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': On a generalization of the concept of Lie algebra (in Russian), Dokl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Akad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Nauk SSSR 165 (1965), 471–473;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' translated into English in Soviet Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Dokl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 6 (1965), 1450–1452.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [4] B¨ohm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Lafuente, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Immortal homogeneous Ricci flows, Invent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 212 (2018), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, 461–529.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [5] Bonezzi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Hohm, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Leibniz gauge theories and infinity structures, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 377 (2020), 2027–2077.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [6] Casas, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Insua, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Ladra, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Ladra, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': An algorithm for the classification of 3-dimensional complex Leibniz algebras, Linear Algebra Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 436 (2012), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 9, 3747–3756.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [7] Feldvoss, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Leibniz algebras as nonassociative algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' In: Vojtechovsky, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Bremner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Carter, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Evans, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Huerta, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Kinyon, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Moorhouse, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Smith, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Nonassociative mathematics and its applications, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 721.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Providence, RI: American Mathematical Society, (2019) p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 115–149.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [8] Hohm, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Samtleben, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Leibniz-Chern-Simons theory and phases of exceptional field theory, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 369 (2019), 1055–1089.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [9] Khudoyberdiyev, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Omirov, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': The classification of algebras of level one, Linear Algebra Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 439(11) (2013), 3460– 3463.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [10] Kirwan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Momentum maps and reduction in algebraic geometry, Differ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 9 (1998)135–172.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [11] Kotov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Strobl, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': The embedding tensor, Leibniz-Loday algebras, and their higher Gauge theories, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 376 (2020), 235–258.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [12] Lauret, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': On the moment map for the variety of Lie algebras, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Funct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 202 (2003), 392–423.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [13] Lauret, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Degenerations of Lie algebras and geometry of Lie groups, Differ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 18 (2003), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2, 177–194.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [14] Lauret, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Einstein solvmanifolds are standard, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 172 (2010), 1859–1877.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [15] Lauret, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Ricci soliton solvmanifolds, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Reine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Angew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 650 (2011), 1–21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [16] Lavau, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Tensor hierarchies and Leibniz algebras, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 144 (2019), 147–189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [17] Lavau, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=', Palmkvist, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Infinity-enhancing Leibniz algebras, Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 110 (2020), 3121–3152.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [18] Loday, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Une version non commutative des algbres de Lie: les algbres de Leibniz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (French) [A noncommutative version of Lie algebras: the Leibniz algebras], Enseign.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (2) 39 (1993), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 3-4, 269–293.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [19] Loday, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Pirashvili, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Universal enveloping algebras of Leibniz algebras and (co)homology, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 296 (1993), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 1, 139–158.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [20] Mason, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Yamskulna, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Leibniz algebras and Lie algebras, SIGMA Symmetry Integrability Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Methods Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 9 (2013), Paper 063, 10 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [21] Ness, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': A stratification of the null cone via the moment map, Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 106 (1984), 1281-1329 (with an appendix by D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Mumford).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 20 ZHIQI CHEN, SAIYU WANG, AND HUI ZHANG [22] Sheng, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Tang, Rong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Zhu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': The controlling L∞-algebra, cohomology and homotopy of embedding tensors and Lie-Leibniz triples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 386 (2021), 269–304.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [23] Strobl, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Leibniz-Yang-Mills gauge theories and the 2-Higgs mechanism, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' D 99 (2019), 115026.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [24] Strobl, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Wagemann, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': Enhanced Leibniz algebras: structure theorem and induced Lie 2-algebra, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 376 (2020), 51–79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [25] Towers, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' : On the nilradical of a Leibniz algebra, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 49 (2021), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 10, 4345–4347.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' [26] Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=': The moment map for the variety of 3-Lie algebras, to appear in J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Funct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' (Zhiqi Chen) School of Mathematics and Statistics, Guangdong University of Technology, Guangzhou 510520, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' China Email address: chenzhiqi@nankai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='cn (Saiyu Wang) School of Mathematical Sciences and LPMC, Nankai University, Tianjin 300071, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' China Email address: 2120200040@mail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='nankai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='cn (Hui Zhang) School of Mathematics, Southeast University, Nanjing 210096, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content=' China Email address: 2120160023@mail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='nankai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} +page_content='cn' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FPT4oBgHgl3EQf7DWb/content/2301.13203v1.pdf'} diff --git a/oNE3T4oBgHgl3EQfLAnj/content/2301.04360v1.pdf b/oNE3T4oBgHgl3EQfLAnj/content/2301.04360v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f125c76c5be4dd00910b94c8a481c98d7fb7f83c --- /dev/null +++ b/oNE3T4oBgHgl3EQfLAnj/content/2301.04360v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f14e2ca9b81e262519d1aab6dd7cd31491f9cf8aeeff7fc3bce10a33ba23551 +size 408732 diff --git a/oNE3T4oBgHgl3EQfLAnj/vector_store/index.pkl b/oNE3T4oBgHgl3EQfLAnj/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..cea36e75e00572781ca2d3c8cfebf597bb3db9be --- /dev/null +++ b/oNE3T4oBgHgl3EQfLAnj/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0d5fa533d0d2bd41c5c1ae3abbb22c9a75a6b5f74a6a066fbd9c658f9803b73 +size 98796 diff --git a/oNFIT4oBgHgl3EQfuyvd/content/tmp_files/2301.11345v1.pdf.txt b/oNFIT4oBgHgl3EQfuyvd/content/tmp_files/2301.11345v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..09e559840494840b1742a6197ee3ce7b16c83571 --- /dev/null +++ b/oNFIT4oBgHgl3EQfuyvd/content/tmp_files/2301.11345v1.pdf.txt @@ -0,0 +1,1847 @@ +Detectable Gravitational Wave +from Graviton Bremsstrahlung +during Reheating +Basabendu Barman,a Nicolás Bernal,b +Yong Xu,c and Óscar Zapatad +aInstitute of Theoretical Physics, Faculty of Physics, University of Warsaw +ul. Pasteura 5, 02-093 Warsaw, Poland +bNew York University Abu Dhabi +PO Box 129188, Saadiyat Island, Abu Dhabi, United Arab Emirates +cPRISMA+ Cluster of Excellence and Mainz Institute for Theoretical Physics +Johannes Gutenberg University, 55099 Mainz, Germany +dInstituto de Física, Universidad de Antioquia +Calle 70 # 52-21, Apartado Aéreo 1226, Medellín, Colombia +E-mail: basabendu88barman@gmail.com, nicolas.bernal@nyu.edu, +yonxu@uni-mainz.de, oalberto.zapata@udea.edu.co +Abstract. We revisit graviton production via Bremsstrahlung from the decay of the inflaton +during inflationary reheating. Using two complementary computational techniques, we first +show that such 3-body differential decay rates differ from previously reported results in the +literature. We then compute the stochastic gravitational wave (GW) background that forms +during the period of reheating, when the inflaton perturbatively decays with the radiative +emission of gravitons. By computing the number of relativistic degrees of freedom in terms of +∆Neff, we constrain the resulting GW energy density from BBN and CMB. Finally, we project +current and future GW detector sensitivities in probing such a stochastic GW background, +which typically peaks in the GHz to THz ballpark, opening up the opportunity to be detected +with microwave cavities and space-based GW detectors. +arXiv:2301.11345v1 [hep-ph] 26 Jan 2023 + +Contents +1 +Introduction +1 +2 +The Framework +2 +2.1 +Decay into Scalars +3 +2.2 +Decay into Fermions +4 +2.3 +Decay into Vectors +5 +3 +Gravitational Wave Contribution to ∆Neff +5 +4 +Gravitational Wave Spectrum +7 +5 +Conclusions +10 +A Feynman Rules for Relevant Vertices +11 +B Calculation of the Decay Widths +11 +B.1 +Scalar Case +11 +B.1.1 +Polarization Tensor in Explicit Form +11 +B.1.2 +Polarization Sum +14 +B.2 +Fermionic Case +15 +B.3 +Vector Case +17 +1 +Introduction +The existence of a primordial gravitational wave (GW) background is one of the most cru- +cial predictions of the inflationary scenario of the early universe. Stochastic GWs can have +several sources, viz., from the quantum fluctuations during inflation [1–4] that give rise to +tensor perturbations, during preheating [5–9] when rapid particle production via parametric +resonance occurs or from oscillations of cosmic string loops [10–13], originated from, for exam- +ple, a spontaneously broken U(1) symmetry (gauged or global). However, as pointed out in +Refs. [14, 15], stochastic GWs of primordial origin can be sourced from the decay of the infla- +ton.1 In that case, after the end of inflation, during the era of reheating, the inflaton field can +decay into particles of arbitrary spins, depending on the microscopic nature of its interaction. +Considering gravitons to emerge as quantum fluctuations over the classical background, they +inexorably couple to matter, leading to a graviton production from inflaton decays, similar to +the Bremsstrahlung process as considered in Ref. [17]. It is then unavoidable to have inflaton +decay as a source of the primordial GW background. +With this motivation, in this work, we revisit the scenario in which the inflaton can +interact with bosons or fermions, leading to its perturbative decay during reheating, resulting +in the production of a standard model (SM) radiation bath. Here, we would like to emphasize +that inflaton decay via trilinear couplings fully drains the inflaton energy, allowing the Uni- +verse to transit into a radiation-domination phase [18]. By considering fluctuations over a flat +background, we introduce the dynamical (massless) graviton field of spin 2 that communicates +1Such graviton can also act as a mediator in the production of the dark matter relic abundance [16]. +– 1 – + +with all other matter fields through the energy-momentum tensor. This eventually leads to +3-body decay of the inflaton, involving a pair of scalars, fermions, or vector bosons, along +with the radiative emission of a graviton. In computing the 3-body decay widths, we follow +two complementary approaches: a) we explicitly construct the graviton polarization tensors, +and b) we utilize the polarization sum and show that our expressions converge in either case, +however, differing from previous analyses reported in Refs. [14, 15, 19]. It is then possible to +compute the GW energy density from the differential 3-body decay width of the inflaton. +As is well known, in order for Big Bang Nucleosynthesis (BBN) to proceed successfully, +the energy budget of the Universe must not comprise a significant amount of extra relativistic +species, including GWs. This condition requires that the energy fraction of GWs to the SM +radiation degrees of freedom (DoF) at that time is not greater than about ∼ 10%. Regardless +of its origin, the energy density in GW established before BBN acts as radiation, and thus its +impact on BBN is fully captured by ∆Neff, which counts the number of relativistic species. +Furthermore, GWs with initial adiabatic conditions leave the same imprint on the CMB as +free-streaming dark radiation, and in this case, the limit on the present-day energy density +in GWs is Ω(0) +GW h2 < 1.3 × 10−6 [20, 21]. We discuss the impact of the CMB measurement +of ∆Neff on the GW energy density emitted from the decay of the inflaton, taking into +account the evolution of the energy densities during reheating. We compare the predicted +spectrum of stochastic GWs with existing and future experiments, finding that the present +GW spectrum strongly requires high-frequency GW detectors. Interestingly, we see that such +high-frequency GWs could be detected, for example, with resonant cavity detectors [22, 23] +or with space-based futuristic GW detectors [24, 25]. +The paper is organized as follows. We present the underlying interaction Lagrangian and +present the 2- and 3-body decay rates in Section 2. In Section 3 we calculate the constraints +from ∆Neff on the GW energy density. The computation of the primordial GW spectrum is +presented in Section 4. Finally, we conclude in Section 5. In the appendixes, we present our +calculations in detail. +2 +The Framework +The underlying interaction Lagrangian for the present set-up can be divided into two parts. +One part involving a trilinear interaction between the inflaton φ and a pair of complex scalar +doublets ϕ with 4 DoF, a pair of vector-like Dirac fermions ψ with 4 DoF, or a pair of massive +vector bosons Vµ with 3 DoF, given by +L(2) +int ⊃ −µ φ |ϕ|2 − yψ ψ ψ φ − gV Vµ V µ φ , +(2.1) +where the corresponding interaction strengths are parameterized in terms of the couplings µ, +yψ, and gV , respectively. The superscript (2) denotes interactions that lead to a two-body +decay of the inflaton. Also, note that the coupling strength µ and gV have mass dimension, +while the Yukawa interaction strength yψ is dimensionless. Here, we remain agnostic about +the underlying UV-complete Lagrangian and, for simplicity, work with an effective theory. +On the other hand, since we are interested in the unavoidable Bremsstrahlung pro- +cess involving gravitons, we expand the metric gµν around Minkowski spacetime: gµν ≃ +ηµν + +2 +MP hµν, where MP is the reduced Planck mass. This inevitably leads to gravitational +interactions that are described by the Lagrangian [26] +√−g L(g) +int ⊃ − 2 +MP +hµν T µν, +(2.2) +– 2 – + +where hµν refers to the graviton field that appears as a quantum fluctuation on the flat +background, and Tµν represents the energy-momentum tensor involving all matter particles +involved in the theory. Further, we do not consider any non-minimal coupling between the +new fields of the theory and gravity; hence, this is a minimal scenario. All relevant Feynman +rules involving the graviton and particles of different spins (0, 1/2, and 1) are elaborated in +Appendix A. The interactions appearing in Eqs. (2.1) and (2.2) give rise to 2- and 3-body +decays of the inflaton into pairs of ϕ, ψ, and V in the final state, along with the emission +of a massless graviton. After production, gravitons propagate and constitute the stochastic +GW background, the spectrum of which we shall compute, considering different spins of the +final-state products. +With this setup, we now move on to the discussion of three different decay scenarios, +where the inflaton φ perturbatively decays into either a pair of bosons or a pair of fermions, +with graviton radiation, due to the graviton-matter coupling. In the following sections, we +discuss three cases individually. +2.1 +Decay into Scalars +We start with the inflaton decay into spin-0 states, where the final-state particles are con- +sidered to be complex doublet scalars, e.g. the SM Higgs doublet. The 2-body decay rate in +this case, following the Lagrangian in Eq. (2.1), is given by +Γ(0) +0 += 2 M +16 π +� µ +M +�2 � +1 − 4 y2 , +(2.3) +where y ≡ m/M, with m being the mass of the daughter particles (independent of their +spin). The factor of 2 appears because of two possible decay channels for the complex scalar +doublet in the final state. The subscript represents the spin of the final-state particles, while +the superscript (0) denotes the 2-body decay width. +As advocated before, due to the irreducible gravitational interaction (cf. Eq. (2.2)), the +final state could also contain a graviton [14], leading to a 3-body decay of φ. The general +three-body decay diagrams are shown in Fig. 1, with l, ω, p, and q denoting the initial and final +four-momentum, respectively.2 Here, we denote any general final state as F, where F can be +a scalar, a fermion, or a gauge boson. The detailed computation of the 3-body decay following +two different methodologies, namely the explicit construction of graviton polarization tensors +and the polarization sum, is reported in Appendix B. The differential decay rate for the scalar +final state with the emission of a graviton of energy Eω reads +dΓ(1) +0 +dEω += +1 +32 π3 +� µ +MP +�2 �(1 − 2x) (1 − 2x + 2y2) +4x α−1 ++ y2 (y2 + 2x − 1) +x +ln +�1 + α +1 − α +�� +, +(2.4) +with x ≡ Eω/M and +α ≡ +� +1 − +4 y2 +1 − 2x , +(2.5) +with a graviton energy spanning the range +0 < Eω ≤ M +�1 +2 − 2 y2 +� +. +(2.6) +2The amplitude of the bottom right diagram is proportional to ηµνϵµν (cf. Eq. (A.3)) and therefore vanishes +due to the traceless condition for a massless graviton. +– 3 – + +Figure 1. Feynman diagrams for an inflaton decay into a pair of particles F, along with a radiated +graviton. Here F could be a scalar ϕ, a fermion ψ, or a vector V , while hµν is the graviton tensor +field. We denote the incoming and outgoing momenta with dashed arrowheads. +Since the differential rate in Eq. (2.4) plays a key role in our subsequent calculation, we would +like to make some remarks before proceeding. Note that a graviton could carry at most half of +the inflaton energy, which occurs in a limit where the daughter particle mass is zero, namely +y → 0. In such a case, the differential decay rate vanishes as the phase space closes. More +generally, the differential decay rate should also vanish when x → 1 +2 − 2 y2. We notice that +our result differs from that reported in Eq. (7) of Ref. [14]. +2.2 +Decay into Fermions +Following the second term in Eq. (2.1), we compute the 2-body decay of φ into a pair of +fermions in the final state. In that case, the decay width is given by +Γ(0) +1/2 = +y2 +ψ +8π M +� +1 − 4 y2�3/2 . +(2.7) +As before, one can compute the differential rate of the three-body final state involving a pair +of ψ’s and a graviton in the final state, leading to +dΓ(1) +1/2 +dEω += +y2 +ψ +64 π3 +� M +MP +�2 � +1 − 2x +x α−1 +� +8x y2 + 2x (x − 1) − 8y4 − 2y2 + 1 +� ++ 4 y2 � +(5 − 8x) y2 − (x − 1)2 − 4y4� +x +ln +�1 + α +1 − α +� � +, +(2.8) +see Appendix B.2 for details. Interestingly, we again find that our expression for the 3-body +decay rate differs from those reported in Eq. (8) of Ref. [14] and Eq. (B.1) of Ref. [19]. +– 4 – + +F +p +m +m +D +q +F +F ++ +p +p +h +nn, +m +m +t +J +q +q +F2.3 +Decay into Vectors +For inflaton decays to massive vectors via the trilinear interaction term φ VµV µ, the 2-body +decay rate is given by +Γ(0) +1 += M +64 π +�gV +M +�2 1 − 4 y2 + 12 y4 +y4 +� +1 − 4 y2 , +(2.9) +while the 3-body differential decay rate reads (see Appendix B.3 for details of the computa- +tion) +dΓ(1) +1 +dEω += +1 +1920 π3 x y4 +� gV +MP +�2 � +α +� +360 (1 − 2x) y6 + 4 (4x (23 x − 5) + 15) y4 ++ 2 (2 x − 1) (28 x (14 x − 5) + 15) y2 + (1 − 2x)2 (4 x (2 x − 5) + 15) +� ++ 60 y2 � +12y6 + 16(x − 1)y4 + (5 + 4x(4x − 3))y2 − (1 − 2x)2(1 + 2x) +� +ln +�1 + α +1 − α +� � +. +(2.10) +Note that factor 1/y4 comes from the polarization sum for the massive vector, and therefore +the massless case cannot be recovered in the limit y → 0. We would like to mention that our +results in Eqs. (2.9) and (2.10) differ from the ones reported in Eqs. (4) and (7) of Ref. [15]. +3 +Gravitational Wave Contribution to ∆Neff +As we know, to switch to the standard hot Big Bang cosmology after inflation, the inflaton +energy must be transferred into SM radiation DoF, which eventually thermalize and dominate +the Universe’s energy budget. This transition process, known as reheating, is typically marked +by the equality between the inflaton and radiation energy densities. The reheating process +must end before the onset of BBN, which occurs at TBBN ≃ 4 MeV [27–31]. Now, in order +for BBN to proceed successfully, the energy budget of the Universe must not comprise a +significant amount of extra relativistic species, including GWs. Regardless of its origin, the +energy density established in GW before BBN acts as radiation, and thus its impact on BBN +is fully captured in terms of ∆Neff. Therefore, an excess of the GW energy density around +BBN can be restricted by considering the (present and future) bounds on ∆Neff from CMB, +BBN, and combined. In this section, we discuss our calculations considering that the inflaton +φ oscillates in a simple quadratic potential, which implies that its energy density scales as +non-relativistic matter during reheating. +The number of effective neutrinos Neff is defined from the expression of the radiation +energy density in the late universe (at a photon temperature T∆Neff) as +ρrad(T∆Neff) = ργ + ρν + ρGW = +� +1 + 7 +8 +�Tν +Tγ +�4 +Neff +� +ργ(T∆Neff) , +(3.1) +where ργ, ρν, and ρGW correspond to the photon, SM neutrino, and GW energy densities, +respectively, with Tν/Tγ = (4/11)1/3. Within the SM, the prediction taking into account the +non-instantaneous neutrino decoupling is Neff (SM) = 3.046 [32–36], whereas the presence of +GWs implies +∆Neff ≡ Neff − NSM +eff = 8 +7 +�11 +4 +� 4 +3 ρGW(T∆Neff) +ργ(T∆Neff) += 8 +7 +�11 +4 +g⋆s(T∆Neff) +g⋆s(Trh) +� 4 +3 g⋆(Trh) +2 +ρGW(Trh) +ρR(Trh) , +(3.2) +– 5 – + +where +ρR(T) = π2 +30 g⋆(T) T 4, +(3.3) +s(T) = 2π2 +45 g⋆s(T) T 3 +(3.4) +are the SM radiation energy density and the SM entropy density, with g⋆(T) and g⋆s(T) the +numbers of relativistic degrees of freedom [37]. +The evolution of inflaton, SM radiation, and GW energy densities can be tracked using +the Boltzmann equations3 +dρφ +dt + 3 H ρφ = − +� +Γ(0) + Γ(1)� +ρφ , +(3.5) +dρR +dt + 4 H ρR = +Γ(0) ρφ + +� dΓ(1) +dEω +M − Eω +M +ρφ dEω , +(3.6) +dρGW +dt ++ 4 H ρGW = + +� dΓ(1) +dEω +Eω +M ρφ dEω , +(3.7) +where H stands for the Hubble expansion rate given by +H2 = ρφ + ρR + ρGW +3 M2 +P +, +(3.8) +while Γ(0) and Γ(1) are the 2- and 3-body decay widths. +The factors (M − Eω)/M and +Eω/M correspond to the fractions of inflaton energy injected into SM radiation and GWs, +respectively. It follows that +d(ρGW/ρR) +da +≃ +1 +a H +ρφ +ρR +�� dΓ(1) +dEω +Eω +M dEω − ρGW +ρR +Γ(0) +� +. +(3.9) +This expression can be integrated during reheating, that is, for amax ≤ a ≤ arh, corresponding +to photon temperatures Tmax ≥ T ≥ Trh. Importantly, during reheating in which the SM +thermal bath is produced and the universe transitions to radiation domination, the bath +temperature may rise to a value Tmax that exceeds Trh [38]. That the maximum temperature +of the thermal bath may reach Tmax > Trh before cooling is not apparent if one takes the +instantaneous decay approximation for reheating. We note that during reheating +ρφ(a) = ρφ(arh) +�arh +a +�3 +, +(3.10) +T(a) = Trh +�arh +a +�3/8 +, +(3.11) +as the inflaton is assumed to be non-relativistic and to decay with a constant decay width +into SM radiation. We emphasize that the scaling of the SM temperature is due to the fact +that the SM radiation is not free, but is sourced by inflaton decays. The end of the reheating +corresponds to the moment in which the equality ρR(Trh) = ρφ(Trh) is realized. Additionally, +assuming that at the beginning of the reheating, the universe had no SM radiation or GWs, +3We would like to emphasize that our approach of computation of the GW energy density takes care of +the evolution of energy densities beyond the instantaneous approximation as was done in Refs. [14, 15]. +– 6 – + +and taking into account that at the end of the reheating Γ(0) ≃ H(Trh), Eq. (3.9) admits the +analytical solution +ρGW(Trh) +ρR(Trh) ≃ +� M/2 +0 +1 +Γ(0) +dΓ(1) +dEω +Eω +M dEω +� +1 − +� Trh +Tmax +�8/3� +. +(3.12) +We notice that within the approximation of an instantaneous decay of the inflaton, the ex- +pression in the squared brackets reduces to one. +For the different decay channels, in the limit y → 0, one has +ρGW(Trh) +ρR(Trh) ≃ Cρ +M2 +π2M2 +P +� +1 − +� Trh +Tmax +�8/3� +, +(3.13) +where Cρ = 1/96 for scalars, 3/128 for fermions, and 127/1800 for vectors. Therefore, the +corresponding GW contribution to ∆Neff is +∆Neff ≃ C∆Neff +� M +MP +�2 � +1 − +� Trh +Tmax +�8/3� +, +(3.14) +with C∆Neff ≃ 0.01 for scalars, 0.03 for fermions, and 0.08 for vectors, where we have taken +g⋆s(T∆Neff) ≃ 10.75. +Again, note that in the instantaneous reheating approximation, the +square bracket in Eq. (3.14) becomes unity. +To avoid jeopardizing the successful predic- +tions from BBN, the reheating temperature must satisfy Trh ≥ TBBN. Furthermore, recent +BICEP/Keck measurements have offered a stronger bound (than that of previous Planck +results [39]) on the tensor-to-scalar ratio r < 0.035 [40], implying Trh ≲ 5.5 × 1015 GeV. +Within the framework of ΛCDM, Planck legacy data produces Neff = 2.99±0.34 at 95% +CL [39]. Once the baryon acoustic oscillation (BAO) data are included, the measurement +becomes more stringent: Neff = 2.99 ± 0.17 at 1σ CL. Upcoming CMB experiments, such as +SPT-3G [47] and the Simons Observatory [48], will soon improve Planck’s precision on Neff. +In particular, CMB-S4 [42] and CMB-HD [43] will be sensitive to a precision of ∆Neff ∼ 0.06 +and ∆Neff ∼ 0.027 at 95% CL, respectively. As calculated in Ref. [41], a combined analysis +from BBN and CMB results in Neff = 2.880±0.144. The next generation of satellite missions, +such as COrE [44] and Euclid [45], shall impose limits at 2σ on ∆Neff ≲ 0.013. Furthermore, +as mentioned in Ref. [46], a hypothetical cosmic-variance-limited (CVL) CMB polarization +experiment could presumably be reduced to as low as ∆Neff ≲ 3 × 10−6, although this does +not seem to be an experimentally plausible scenario. Figure 2 illustrates the constraint from +∆Neff following Eq. (3.14), considering Tmax ≫ Trh. As discussed above, we show the present +and future limits of ∆Neff on the GW energy density for scenarios in which the graviton +decays into a pair of scalars (red dotted line), a pair of Dirac fermions (blue dashed line), or a +pair of massive vector bosons (black solid line). As we can see, the impact of GW production +on ∆Neff through all these channels is very challenging not only for present, but even for the +projected experimental sensitivities, unless M ∼ MP . A large inflaton mass is required to +overcome the strong Planck suppression originating from minimal graviton coupling. Note +that there is a possibility for experiments such as COrE or Euclid to probe the vector scenario. +4 +Gravitational Wave Spectrum +After being produced from inflaton 3-body decays, gravitons would propagate and spread +in the whole universe, forming a homogeneous and isotropic stochastic GW background at +– 7 – + +1015 +1016 +1017 +1018 +M [GeV] +10−6 +10−5 +10−4 +10−3 +10−2 +10−1 +100 +∆Neff +Planck 18 +BBN+CMB +CMB-S4 +CMB-HD +COrE/Euclid +CVL +y → 0 +Tmax ≫ Trh +Vector +Fermion +Scalar +Figure 2. Contribution of GW energy density to ∆Neff (cf. Eq. (3.14) with Tmax ≫ Trh), where the +solid black, dashed blue, and dot-dashed red slanted straight lines correspond to scalar, fermion, and +vector boson final states. We show the present limits from PLANCK [39], CMB+BBN combined [41], +and future limits from CMB-S4 [42], CMB-HD [43], COrE [44]/ Euclid [45], and also hypothetical +CVL experiment [46], from top to bottom. +present, after the attenuation of its energy and amplitude due to cosmic expansion. The +primordial GW spectrum at present ΩGW(f) for a frequency f is defined by +ΩGW(f) = 1 +ρc +dρGW +d ln f = Ω(0) +γ +d(ρGW/ρR) +d ln f += Ω(0) +γ +g⋆(Trh) +g⋆(T0) +� g⋆s(T0) +g⋆s(Trh) +�4/3 d(ρGW(Trh)/ρR(Trh)) +d ln Eω +, +(4.1) +where ρc is the critical energy density, and Ω(0) +γ h2 ≃ 2.47 × 10−5 is the observed photon +abundance [39]. Equation (4.1) must be evaluated at an energy +Eω = 2π f a0 +arh += 2π f Trh +T0 +�g⋆s(Trh) +g⋆s(T0) +�1/3 +, +(4.2) +taking into account the redshift of the GW energy between reheating and the present epoch. +Similarly to Eq. (3.12), the differential ratio of GW to SM radiation energy densities at +the end of reheating is +d(ρGW(Trh)/ρR(Trh)) +dEω +≃ dΓ(1) +dEω +Eω +M +1 +Γ(0) +� +1 − +� Trh +Tmax +�8/3� +, +(4.3) +where again, within the approximation of an instantaneous decay of the inflaton, the expres- +sion in the squared brackets reduces to one. For the inflaton decay into particles with different +– 8 – + +10−3 +100 +103 +106 +109 +1012 +1015 +f [Hz] +10−37 +10−35 +10−33 +10−31 +10−29 +10−27 +10−25 +10−23 +10−21 +hc +LISA +BBO +uDECIGO +ET +Res. Cavities +IAXO SPD +IAXO HET +GB +Planck +COrE/Euclid +CVL +y → 0 +Tmax ≫ Trh +Œ + +Vector +Fermion +Scalar +Figure 3. Dimensionless strain hc as a function of the GW frequency f, for the two benchmarks (Œ +and ) described in the text, assuming Tmax ≫ Trh and y → 0. The black solid, blue dashed, and red +dotted curves correspond to decays into vector, fermion, and scalar final states, respectively. Projected +sensitivities from different GW detection experiments are also shown in orange (adapted from Refs. [49, +50]). The gray dashed diagonal lines are CMB bounds on ∆Neff from Planck, COrE/Euclid, and +hypothetical CVL experiment, respectively. +spins, one has +ΩGW(f) ≃ CΩGW +Trh +5.5 × 1015 +M +MP +f +1012 Hz , +(4.4) +with CΩGW ≃ 1.4 × 10−8 for scalars, CΩGW ≃ 2.8 × 10−8 for fermions, and CΩGW ≃ 11.2 × 10−8 +for vectors. The latter expression is valid for frequencies f smaller than +f ≲ M +4π +T0 +Trh +� g⋆s(T0) +g⋆s(Trh) +�1/3 +≃ 4.1 × 1012 +� M +MP +� �5.5 × 1015 GeV +Trh +� +Hz , +(4.5) +where we have used g⋆s(T0) = 3.94 and g⋆s(Trh) = 106.75. +Finally, we compute the dimensionless strain defined as [51] +hc(f) = 1 +f +� +3 H2 +0 ΩGW(f) +2π2 += 1.26 × 10−18 +�Hz +f +� � +h2 ΩGW(f) , +(4.6) +where H0 ≡ H(T0) ≃ 1.44 × 10−42 GeV is the present-day Hubble parameter, and h = +0.674 [39]. In Fig. 3 we show the dimensionless strain hc as a function of the GWs frequency +– 9 – + +f, for two benchmark points: Œ M = MP /10 and Trh = 5.5×1015 GeV, and  M = MP /103 +and Trh = MP /(2 × 104). In the same plane, we project the limits from several proposed +GW detectors, for example, LISA [52], the Einstein Telescope (ET) [53–56], the Big Bang +Observer (BBO) [57–59], ultimate DECIGO (uDECIGO) [24, 25], GW-electromagnetic wave +conversion in the vacuum (solid) and in a Gaussian beam (GB) (dotted) [49, 60], resonant +cavities [22, 23], and the International Axion Observatory (IAXO) [61, 62]. We have projected +the ∆Neff bounds from Planck, COrE/Euclid and CVL from Fig. 2 as a bound on the GW +strain using [51] +� +d (ln f) ΩGW(f) h2 ≤ 5.6 × 10−6 ∆Neff . +(4.7) +These are shown by the diagonal straight gray lines. As one can infer from Eq. (4.5), a larger +M/Trh ratio corresponds to a higher frequency, which is also reflected in Fig. 3 curves. As we +see, only microwave cavity detectors are capable of probing the high-frequency regime of the +GW spectrum. Detectors such as uDECIGO, on the other hand, might be able to reach the +lower frequency part of the spectrum. +5 +Conclusions +Inflaton 3-body decays is a source of the stochastic gravitational wave (GW) background, due +to the inexorable graviton Bremsstrahlung. In this work, we have revisited such three-body +decay rates, considering the perturbative coupling of the inflaton with a pair of massive spin-0 +bosons, spin-1/2 fermions, and spin-1 vector bosons, along with the radiative emission of a +massless graviton from either the initial or the final states. We have found that the previously +reported results show discrepancies with our findings in all three cases. To make our claim +more robust, we employed two distinct procedures in calculating the graviton polarization +and found that they agree with each other. +With this improvement over existing results, we then numerically calculated the contri- +bution of the GW energy density to the number of degrees of freedom around the time of BBN +and CMB, typically encoded in ∆Neff. We have taken care of the evolution of the energy +densities of inflaton, radiation, and GW by solving a set of coupled Boltzmann equations. +Due to Planck-scale suppression from minimal gravitational coupling, the GW energy density +from inflaton Bremsstrahlung stays well below the CMB bounds on ∆Neff, regardless of the +spin of the final-state particles. As the spectrum of GW peaks in the GHz to THz ballpark, +this primordial GW signature remains beyond the reach of most detector facilities; however, +it may leave a footprint in resonant cavity detectors or even in upcoming space-based GW +detectors. +Acknowledgments +The authors thank Manuel Drees, Yann Mambrini, and Simon Cléry for useful discussions, +Rome Samanta for providing the experimental limits, and also Yong Tang and Da Huang for +helpful communication. NB received funding from the Spanish FEDER / MCIU-AEI under +the grant FPA2017-84543-P. OZ has received funding from the Ministerio de Ciencia, Tec- +nología e Innovación (MinCiencias - Colombia) through Grants 82315-2021-1080 and 80740- +492-2021, and has been partially supported by Sostenibilidad-UdeA and the UdeA/CODI +Grant 2020-33177. +– 10 – + +A +Feynman Rules for Relevant Vertices +Here, we focus on a massless spin-2 graviton field, whose polarization tensor ϵµν has to satisfy +the following conditions [63, 64] +ϵi µν = ϵi νµ +symmetric, +(A.1) +ωµ ϵi µν = 0 +transverse, +(A.2) +ηµν ϵi µν = 0 +traceless, +(A.3) +ϵi µν ϵj ⋆ +µν = δij +orthonormal, +(A.4) +for i, j = 1, 2 being the polarization indices and ω the graviton four momentum. +The +polarization sum for the massless graviton is [65] +� +pol +ϵ⋆µνϵαβ = 1 +2 +� +ˆηµαˆηνβ + ˆηµβ ˆηνα − ˆηµν ˆηαβ� +, +(A.5) +with +ˆηµν ≡ ηµν − ωµ¯ων + ¯ωµων +ω · ¯ω +, +(A.6) +where ω = (Eω, ⃗ω) and ¯ω = (Eω, −⃗ω). For a massless graviton, we have ω·¯ω = E2 +ω+⃗ω2 = 2E2 +ω. +The polarization sum in Eq. (A.5) indeed preserves the symmetric, transverse, traceless, and +orthonormal conditions.4 Note that due to the van Dam-Veltman discontinuity [65, 66], one +cannot obtain the massless graviton propagator from the massive one simply by taking the +limit of the graviton mass mhµν → 0. +From the interaction Lagrangian in Eq. (2.2) the relevant Feynman rules can be ex- +tracted, and we tabulate them in Fig. 4. +B +Calculation of the Decay Widths +In this section, we present details of the computation of the differential rates for the inflaton +decay into three-body final states, including a graviton. +B.1 +Scalar Case +To calculate the differential decay rate and cross-check the results, we present two different +strategies based on: i) an explicit construction for the graviton polarization tensor and ii) +polarization sum for the massless graviton. +B.1.1 +Polarization Tensor in Explicit Form +Without losing generality, we choose a coordinate system in which the graviton moves along +the x direction, and hence the four-momentum of the graviton is ω = (Eω, ωx, 0, 0), where +ω2 = 0, and then ωx = Eω. The four-momentum of the inflaton and its other two decay +products are l = (M, 0, 0, 0), p = (Ep, px, py, pz), and q = (M −Ep −Eω, −px −ωx, −py, −pz), +respectively. +4We note that the naive polarization sum � +pol ϵ⋆µνϵαβ = 1 +2 +� +ηµαηνβ + ηµβηνα − ηµνηαβ� +violates trans- +verse, traceless, and orthonormal conditions and therefore should not be used. +– 11 – + +Figure 4. Relevant graviton-matter vertices for scalar (ϕ), fermion (ψ) and vector boson (V ), from +top to bottom, following Ref. [26]. +The two polarization tensors that meet traceless, transverse, symmetric, and orthonor- +mal conditions described in Eqs. (A.1) to (A.4) can be explicitly written as [66] +ϵ1 +µν = +1 +√ +2 +� +��� +0 0 0 0 +0 0 0 0 +0 0 1 0 +0 0 0 −1 +� +��� +and +ϵ2 +µν = +1 +√ +2 +� +��� +0 0 0 0 +0 0 0 0 +0 0 0 1 +0 0 1 0 +� +��� . +(B.1) +– 12 – + +P1μP2v + p1vp2μ- nμv (p1· P2 -m²) +P2 +p1 +h +In +(-dg+ d)a- (d+ Id) + (d+ Id) +p2 +山 +hΛK +[naxμ(P1.P2 -m2) - nxP1μP2 + kμP1vP2 -NμP1xP2 +Mp ++p1xp2-Nkμ (p1 P2 -m2) - Nμp1p2 + μp1vp2k +-N vaμ (p1. P2 - m2) l +P2Using the Feynman rules of Fig. 4, the amplitudes for the 3-body decays shown in Fig. 1 are +iM1 = −i µ +MP +lµ lν ϵ⋆µν +i +M Eω += 0 , +(B.2) +iM2 = i µ +MP +pµ pν ϵ⋆µν +j +p · ω +, +(B.3) +iM3 = i µ +MP +qµ qν ϵ⋆µν +k +M Eω − p · ω , +(B.4) +iM4 ∝ ηµνϵµν = 0 , +(B.5) +where M1,2 corresponds to the diagrams in the upper left and upper right panels of Fig. 1, +while M3,4 correspond to the lower left and lower right panels, respectively. Using Eq. (B.1), +we notice that M1 = 0 as the decay takes place in the rest frame of the inflaton with +l = (M, 0, 0, 0), while +� +pol +|M2|2 = +µ2 +(p · ω)2M2 +P +� +j +(pµpνϵ⋆µν +j +)(pµpνϵµν +j ) = +µ2 +2(p · ω)2M2 +P +� +(p2 +y − p2 +z)2 + 4p2 +yp2 +z +� +. +(B.6) +Similarly, +� +pol +|M3|2 = +µ2 +2(M Eω − p · ω)2 M2 +P +� +((−py)2 − (−pz)2)2 + 4p2 +yp2 +z +� += +µ2 +2(M Eω − p · ω)2 M2 +P +� +(p2 +y − p2 +z)2 + 4p2 +yp2 +z +� +, +(B.7) +and the cross-term turns out to be +� +pol +(M2M⋆ +3) = +µ2 +2(M Eω − p · ω)(p · ω)M2 +P +�� +p2 +y − p2 +z +�2 + 4 p2 +y p2 +z +� +. +(B.8) +Note that � +pol (M1M⋆ +2) = 0 and � +pol (M1M⋆ +3) = 0 as M1 = 0. +The total squared +amplitude is then +� +pol +|M|2 = +µ2 +2M2 +P +� +1 +p · ω + +1 +M Eω − p · ω +�2 � +p2 +y + p2 +z +�2 . +(B.9) +Since p · ω = EpEω − pxEω, we have +px = EpEω − p · ω +Eω +, +(B.10) +which together with m2 ≡ p2 = E2 +p − (p2 +x + p2 +y + p2 +z) implies +� +pol +|M|2 = +µ2 +2M2 +P +� +1 +p · ω + +1 +M Eω − p · ω +�2 � +2 Ep +Eω +p · ω − +�p · ω +Eω +�2 +− m2 +�2 += µ2 � +4E2 +ωm2 − 8EpEωM(Eω + Ep) + 4M2(E2 +p + 3EpEω) + E2 +ω − 4(Eω + Ep)M3 + M4) +� +2 M2 +P E2ω M2 (M − 2 Ep)2 [M − 2 (Ep − Eω)]2 +. +(B.11) +– 13 – + +Finally, utilizing +dΓ +dEω += +1 +(2π)3 +1 +8 M +� Ep,max +Ep,min +dEp |M|2, +(B.12) +with +Ep,max = 1 +2 +� +M − Eω + Eω +� +M2 − 2MEω − 4m2 +M(M − 2Eω) +� +, +(B.13) +Ep,min = 1 +2 +� +M − Eω − Eω +� +M2 − 2MEω − 4m2 +M(M − 2Eω) +� +, +(B.14) +one has the total differential cross-section as +dΓ(1) +0 +dEω += +2 +64 π3 +� µ +MP +�2 �(2x − 1) (2x − 2y2 − 1) +4x α−1 ++ y2(y2 + 2x − 1) +x +log +�1 + α +1 − α +�� +(B.15) +for inflaton decays to complex scalars. Note that the extra factor 2 comes from two possible +decay channels of the inflaton. +B.1.2 +Polarization Sum +We now employ the second formalism of the calculation, namely the tensor polarization sum +formalism, as mentioned in Eq. (A.5). The squared amplitudes are +� +pol +|M1|2 = +µ2 +M2E2ω M2 +P +lµlνlαlβ +� +pol +ϵ⋆µνϵαβ += +µ2 +2M2E2ω M2 +P +lµlνlαlβ +� +ˆηµαˆηνβ + ˆηµβ ˆηνα − ˆηµν ˆηαβ� += +µ2 +2M2E2ω M2 +P +� +l2l2 − 4l2 (l · ω)(l · ¯ω) +2E2ω ++ 4(l · ω)2(l · ¯ω)2 +4E4ω +� += +µ2 +2 M2E2ω M2 +P +� +l2 − (l · ω)(l · ¯ω) +E2ω +�2 += +µ2 +2M2E2ω M2 +P +� +M2 − M2� += 0 , +(B.16) +� +pol +|M2|2 = µ2 +M2 +P +pµ pν pα pβ +(p · ω)2 +� +pol +ϵ⋆µνϵαβ = +µ2 +2(p · ω)2 M2 +P +� +p2 − (p · ω)(p · ¯ω) +E2ω +�2 += +µ2 +2(p · ω)2 M2 +P +� +m2 − (p · ω)(p · ¯ω) +E2ω +�2 +, +(B.17) +� +pol +|M3|2 = +µ2 +2(M Eω − p · ω)2 M2 +P +� +q2 − (q · ω)(q · ¯ω) +E2ω +�2 += +µ2 +2(M Eω − p · ω)2 M2 +P +� +m2 − (q · ω)(q · ¯ω) +E2ω +�2 +, +(B.18) +– 14 – + +� +pol +(M2M⋆ +3) = +µ2 +(p · ω)(M Eω − p · ω) M2 +P +pµpνqαqβ +� +pol +ϵ⋆µνϵαβ += +µ2 +2(p · ω)(M Eω − p · ω)M2 +P +� +2 +� +(p · q) − (p · ω)(q · ¯ω) + (p · ¯ω)(q · ω) +2E2ω +�2 +− +� +p2q2 − p2(q · ω)(q · ¯ω) + q2(p · ω)(p · ¯ω) +E2ω ++ (p · ω)(p · ¯ω)(q · ω)(q · ¯ω) +E4ω +� � += +µ2 +2(p · ω)(M Eω − p · ω)M2 +P +� +2 +� +(p · q) − (p · ω)(q · ¯ω) + (p · ¯ω)(q · ω) +2E2ω +�2 +− +� +p2 − (q · ω)(q · ¯ω) +E2ω +� � +q2 − (p · ω)(p · ¯ω) +E2ω +� � +. +(B.19) +Note that Eq. (B.16) implies M1 = 0, which agrees with Eq. (B.2). Therefore, the other two +interference terms � +pol (M1M⋆ +2) = � +pol (M1M⋆ +3) = 0. Using the four vectors, one ends up +with +p · ω = M +� +Eω + Ep − 1 +2M +� +, +(B.20) +p · ¯ω = 2EpEω − p · ω , +(B.21) +q · ω = MEω − p · ω , +(B.22) +q · ¯ω = (l − p − ω)¯ω = MEω − p · ¯ω − 2E2 +ω = MEω − 2EpEω + p · ω − 2E2 +ω , +(B.23) +p · q = 1 +2 +� +(p + q)2 − 2m2� += 1 +2 +� +(l − ω)2 − 2m2� += 1 +2 +� +M2 − 2MEω − 2m2� +. +(B.24) +With these relations, one obtains +� +pol +|M|2 += µ2 � +4E2 +ωm2 − 8EpEωM(Eω + Ep) + 4M2(E2 +p + 3EpEω) + E2 +ω − 4(Eω + Ep)M3 + M4) +� +2M2 +P E2ωM2(M − 2Ep)2[M − 2(Ep − Eω)]2 +, +(B.25) +and further +dΓ(1) +0 +dEω += +µ2 +32π3M2 +P +�(2x − 1) (2x − 2y2 − 1) +4x α−1 ++ y2(y2 + 2x − 1) +x +log +�1 + α +1 − α +�� +, +(B.26) +which agrees with Eq. (B.15). +B.2 +Fermionic Case +Using the list of Feynman rules in Fig. 4, the amplitudes for the inflaton decay into a Dirac +fermion turn out to be +iM1 = −i yψ +MP +lµ lν ϵ⋆µν +M Eω +¯u(p)v(q) , +(B.27) +iM2 = +iyψ +2p · ωMP +� +¯u(p)(pµγν)(/l + 2m)v(q) +� +ϵ∗µν, +(B.28) +iM3 = +iyψ +2(MEω − p · ω)MP +� +¯u(p)(/l − 2m))(qµγν)v(q) +� +ϵ∗µν, +(B.29) +iM4 ∝ ηµνϵ∗µν = 0 , +(B.30) +– 15 – + +Figure 5. Feynman graph for inflaton decay into a pair of Dirac fermions with graviton emission. +where M1, M2, M3 and M4 corresponds to the diagrams from left to right in Fig. 5. Note +that +� +spin, pol +|M1|2 = +y2 +ψ +M2E2ω M2 +P +lµlνlαlβ +� +pol +ϵ⋆µνϵαβ × Tr +� +(/q − m)(/p + m) +� += (B.16) × +y2 +ψ +µ2 × Tr +� +(/q − m)(/p + m) +� += 0 , +(B.31) +which again implies that M1 = 0. In fact, since M1 ∝ Eq. (B.2), one immediately finds that +it vanishes. The other squared amplitudes are given by: +� +spin, pol +|M2|2 = +y2 +ψ +(2p · ω)2 M2 +P +� +pol +ϵ⋆µνϵαβ +× Tr +� +pµγν(/l + 2m)(/q − m)(/l + 2m)γαpβ(/p + m) +� +, +(B.32) +� +spin, pol +|M3|2 = +y2 +ψ +4(MEω − p · ω)2 M2 +P +� +pol +ϵ⋆µνϵαβ +× Tr +� +(/l − 2m)qµγν(/q − m)γαqβ(/l − 2m)(/p + m) +� +, +(B.33) +� +spin, pol +(M2M∗ +3) = +y2 +ψ +4p · ω(MEω − p · ω) M2 +P +� +pol +ϵ⋆µνϵαβ +× Tr +� +pµγν(/l + 2m)(/q − m)γαqβ(/l − 2m)(/p + m) +� +. +(B.34) +Similarly as before, the other interference terms � +pol (M1M⋆ +2) = � +pol (M1M⋆ +3) = 0. The +total matrix element squared turns out to be +� +spin, pol +|M|2 = y2 +ψ +M(M − 2Ep)(M − 2Eω)(2(Ep + Eω) − M) + 4E2 +ωm2 +E2ωM2(M − 2E2p)(M − 2(Ep + Eω))2M2 +P +× +� +4Mm2 � +M(4E2 +p + 12EpEω + 3E2 +ω) − 4M2(Ep + Eω) − 8EpEω(Ep + Eω) + M3� +− M2(M − 2Ep)(2E2 +ω − 2EωM + M2) [M − 2(Ep + Eω)] + 16E2 +ωm4� +, +(B.35) +– 16 – + +山 +山 +w) +p +m +t +w +b +山 +山 +山Figure 6. Feynman diagrams for inflaton decay into a pair of massive vectors with graviton emission. +with which we find +dΓ(1) +1/2 +dEω += +y2 +ψM2 +64 π3 M2 +P +� +(1 − 2x) +xα−1 +� +8xy2 + 2(x − 1)x − 8y4 − 2y2 + 1 +� ++ 4y2 � +(5 − 8x)y2 − (x − 1)2 − 4y4� +x +log +�1 + α +1 − α +� � +. +(B.36) +B.3 +Vector Case +For the amplitudes of inflaton decay into massive spin-1 final states, we obtain +iM1 = −i 2 gV +MP +lµ lν ϵ⋆µν +M Eω +ηµ′ν′ε∗µ′(p, λ)ε∗ν′(q, λ) , +(B.37) +and similarly, we find +iM2 = −i +gV +p · ω MP +ϵ∗µνε∗µ′(p, λ)ε∗ν′(q, λ) · ησν′ +� +ηρσ − (p′ρp′σ) +m2 +� +× +� +ηµνηρµ′ � +p′ · p − m2� +− ηµνp′ +µ′pρ + ηνρp′ +µ′pµ − ηρµ′p′ +νpµ + ηµµ′p′ +νpρ +− ηνρηµµ′ � +p′ · p − m2� ++ ηνµ′p′ +µpρ − ηρµ′p′ +µpν + ηµρp′ +µ′pν −ηνµ′ηµρ +� +p′ · p − m2�� +, +(B.38) +iM3 = −i +gV +(MEω − p · ω) MP +ϵ∗µνε∗µ′(p, λ)ε∗ν′(q, λ) · ησµ′ +� +ηρσ − (q′ρq′σ) +m2 +� +× +� +ηµνηρν′ � +q′ · q − m2� +− ηµνq′ +ν′qρ + ηνρq′ +ν′qµ − ηρν′q′ +νqµ + ηµν′q′ +νqρ +− ηνρηµν′ � +q′ · q − m2� ++ ηνν′q′ +µqρ − ηρν′q′ +µqν + ηµρq′ +ν′qν −ηνν′ηµρ +� +q′ · q − m2�� +, +(B.39) +while +M4 ∝ ηµνϵ∗µν = 0 , +(B.40) +for left to right in Fig. 6, respectively. The total squared matrix element is given by +� +spin, pol +|M|2 = +g2 +V +2E4ω M2 m4(M − 2Ep)2(M − 2Ep − 2Eω)2M2 +P +× +8 +� +k=0 +Ak Ek +p , +(B.41) +– 17 – + +huv +Vμ +Vμ +p +p +pa +m +.....> +- +. +t +huv +tr +...... +...... +b +h +b +b +9 +Vv +Vv +Vy +Vvwhere +A8 = 256M4(M − 2Eω)2, +(B.42) +A7 = 1024(Eω − M)M4(M − 2Eω)2, +(B.43) +A6 = 128(2Eω − M)M3� +− 2Eω(M − 4Eω)m2 − M(M − 2Eω)(12E2 +ω − 27MEω + 14M2) +� +, +(B.44) +A5 = 128M3(2E2 +ω − 3MEω + M2) +× +� +− 6Eω(M − 4Eω)m2 − M(M − 2Eω)(8E2 +ω − 25MEω + 14M2) +� +, +(B.45) +A4 = 16M3� +4E2 +ω(3M − 4Eω)m4 ++ 4Eω(M − 2Eω)(−60E3 +ω + 138ME2 +ω − 91M2Eω + 15M3) m2 ++ M(M − 2Eω)2(16E4 +ω − 136ME3 +ω + 301M2E2 +ω − 250M3Eω + 70M4) +� +, +(B.46) +A3 = −32(Eω − M)M3� +4E2 +ω(4Eω − 3M)m4 − 4Eω(2Eω − M) +× (20E3 +ω − 48ME2 +ω + 31M2Eω − 5M3)m2 + M2 (M − 2Eω)2 +× (12E3 +ω − 45ME2 +ω + 46M2Eω − 14M3) +� +, +(B.47) +A2 = 8M +� +M5(M − 2Eω)(M − Eω)(−26E3 +ω + 68ME2 +ω − 55M2Eω + 14M3) ++ 2EωM2(M − 2Eω)(−56E5 +ω + 264ME4 +ω − 458M2E3 +ω + 360M3E2 +ω − 126M4Eω + 15M5)m2 ++ 16E4 +ω(3M − 4Eω)m6 + 4E2 +ωM(−24E4 +ω + 4ME3 +ω + 26M2E2 +ω − 30M3Eω + 9M4)m4� +, +(B.48) +A1 = 8(Eω − M)M +� +(2M − 3Eω)(Eω − M)(2Eω − M)3M6 ++ 2Eω(2Eω − 3M)(M − 2Eω)2(2E3 +ω − 8ME2 +ω + 6M2Eω − M3)m2M2 ++ 4E2 +ω(−24E4 +ω + 12ME3 +ω + 4M2E2 +ω − 10M3Eω + 3M4)m4M + 16E4 +ω(3M − 4Eω)m6� +, +(B.49) +A0 = 192 E6 +ω m8 + 32 E4 +ω M2(6 E2 +ω − 10 M Eω + 3 M2) m6 + (Eω − M)2 M8 (M − 2Eω)4 +− 4 E2 +ω M2 (2 E2 +ω − 2 M Eω + M2)(16 E4 +ω − 32 M E3 +ω + 6 M2 E2 +ω + 10 M3 Eω − 3 M4) m4 +− 4 Eω M4 (M − 2Eω)4 (2 E3 +ω − 4 ME2 +ω + 5 M2 Eω − M3) m2, +(B.50) +with which one can show that the differential decay rate reads +dΓ(1) +1 +dEω += +g2 +V +1920 π3 x y4 M2 +P +� +60y2 � +−(1 − 2x)2(1 + 2x) + (5 + 4x(−3 + 4x))y2 + 16(x − 1)y4 + 12y6� +log +�1 + α +1 − α +� ++ α +� +360(1 − 2x)y6 + 4(4x(23x − 5) + 15)y4 + 2(2x − 1)(28x(14x − 5) + 15)y2 ++ (1 − 2x)2(4x(2x − 5) + 15) +�� +. +(B.51) +– 18 – + +References +[1] A.A. Starobinsky, Spectrum of relict gravitational radiation and the early state of the universe, +JETP Lett. 30 (1979) 682. +[2] B. Allen, The Stochastic Gravity Wave Background in Inflationary Universe Models, Phys. Rev. +D 37 (1988) 2078. +[3] V. Sahni, The Energy Density of Relic Gravity Waves From Inflation, Phys. Rev. D 42 (1990) +453. +[4] M.S. Turner, M.J. White and J.E. Lidsey, Tensor perturbations in inflationary models as a +probe of cosmology, Phys. Rev. D 48 (1993) 4613 [astro-ph/9306029]. +[5] S.Y. Khlebnikov and I.I. Tkachev, Relic gravitational waves produced after preheating, Phys. +Rev. D 56 (1997) 653 [hep-ph/9701423]. +[6] R. Easther and E.A. Lim, Stochastic gravitational wave production after inflation, JCAP 04 +(2006) 010 [astro-ph/0601617]. +[7] J.F. Dufaux, A. Bergman, G.N. Felder, L. Kofman and J.-P. Uzan, Theory and Numerics of +Gravitational Waves from Preheating after Inflation, Phys. Rev. D 76 (2007) 123517 +[0707.0875]. +[8] L. Bethke, D.G. Figueroa and A. Rajantie, Anisotropies in the Gravitational Wave Background +from Preheating, Phys. Rev. Lett. 111 (2013) 011301 [1304.2657]. +[9] D.G. Figueroa and F. Torrenti, Gravitational wave production from preheating: parameter +dependence, JCAP 10 (2017) 057 [1707.04533]. +[10] A. Vilenkin, Gravitational radiation from cosmic strings, Phys. Lett. B 107 (1981) 47. +[11] Y. Cui, M. Lewicki, D.E. Morrissey and J.D. Wells, Cosmic Archaeology with Gravitational +Waves from Cosmic Strings, Phys. Rev. D 97 (2018) 123505 [1711.03104]. +[12] Y. Cui, M. Lewicki, D.E. Morrissey and J.D. Wells, Probing the pre-BBN universe with +gravitational waves from cosmic strings, JHEP 01 (2019) 081 [1808.08968]. +[13] C.-F. Chang and Y. Cui, Gravitational waves from global cosmic strings and cosmic +archaeology, JHEP 03 (2022) 114 [2106.09746]. +[14] K. Nakayama and Y. Tang, Stochastic Gravitational Waves from Particle Origin, Phys. Lett. B +788 (2019) 341 [1810.04975]. +[15] D. Huang and L. Yin, Stochastic Gravitational Waves from Inflaton Decays, Phys. Rev. D 100 +(2019) 043538 [1905.08510]. +[16] Y. Mambrini, K.A. Olive and J. Zheng, Post-inflationary dark matter bremsstrahlung, JCAP +10 (2022) 055 [2208.05859]. +[17] S. Weinberg, Infrared photons and gravitons, Phys. Rev. 140 (1965) B516. +[18] J.F. Dufaux, G.N. Felder, L. Kofman, M. Peloso and D. Podolsky, Preheating with trilinear +interactions: Tachyonic resonance, JCAP 07 (2006) 006 [hep-ph/0602144]. +[19] A. Ghoshal, R. Samanta and G. White, Bremsstrahlung High-frequency Gravitational Wave +Signatures of High-scale Non-thermal Leptogenesis, 2211.10433. +[20] L. Pagano, L. Salvati and A. Melchiorri, New constraints on primordial gravitational waves +from Planck 2015, Phys. Lett. B 760 (2016) 823 [1508.02393]. +[21] C. Caprini and D.G. Figueroa, Cosmological Backgrounds of Gravitational Waves, Class. +Quant. Grav. 35 (2018) 163001 [1801.04268]. +– 19 – + +[22] A. Berlin, D. Blas, R. Tito D’Agnolo, S.A.R. Ellis, R. Harnik, Y. Kahn et al., Detecting +high-frequency gravitational waves with microwave cavities, Phys. Rev. D 105 (2022) 116011 +[2112.11465]. +[23] N. Herman, L. Lehoucq and A. Fúzfa, Electromagnetic Antennas for the Resonant Detection of +the Stochastic Gravitational Wave Background, 2203.15668. +[24] N. Seto, S. Kawamura and T. Nakamura, Possibility of direct measurement of the acceleration +of the universe using 0.1-Hz band laser interferometer gravitational wave antenna in space, +Phys. Rev. Lett. 87 (2001) 221103 [astro-ph/0108011]. +[25] H. Kudoh, A. Taruya, T. Hiramatsu and Y. Himemoto, Detecting a gravitational-wave +background with next-generation space interferometers, Phys. Rev. D 73 (2006) 064006 +[gr-qc/0511145]. +[26] S.Y. Choi, J.S. Shim and H.S. Song, Factorization and polarization in linearized gravity, Phys. +Rev. D 51 (1995) 2751 [hep-th/9411092]. +[27] S. Sarkar, Big bang nucleosynthesis and physics beyond the standard model, Rept. Prog. Phys. +59 (1996) 1493 [hep-ph/9602260]. +[28] M. Kawasaki, K. Kohri and N. Sugiyama, MeV scale reheating temperature and thermalization +of neutrino background, Phys. Rev. D 62 (2000) 023506 [astro-ph/0002127]. +[29] S. Hannestad, What is the lowest possible reheating temperature?, Phys. Rev. D 70 (2004) +043506 [astro-ph/0403291]. +[30] F. De Bernardis, L. Pagano and A. Melchiorri, New constraints on the reheating temperature of +the universe after WMAP-5, Astropart. Phys. 30 (2008) 192. +[31] P.F. de Salas, M. Lattanzi, G. Mangano, G. Miele, S. Pastor and O. Pisanti, Bounds on very +low reheating scenarios after Planck, Phys. Rev. D 92 (2015) 123534 [1511.00672]. +[32] S. Dodelson and M.S. Turner, Nonequilibrium neutrino statistical mechanics in the expanding +universe, Phys. Rev. D 46 (1992) 3372. +[33] S. Hannestad and J. Madsen, Neutrino decoupling in the early universe, Phys. Rev. D 52 +(1995) 1764 [astro-ph/9506015]. +[34] A.D. Dolgov, S.H. Hansen and D.V. Semikoz, Nonequilibrium corrections to the spectra of +massless neutrinos in the early universe, Nucl. Phys. B 503 (1997) 426 [hep-ph/9703315]. +[35] G. Mangano, G. Miele, S. Pastor, T. Pinto, O. Pisanti and P.D. Serpico, Relic neutrino +decoupling including flavor oscillations, Nucl. Phys. B 729 (2005) 221 [hep-ph/0506164]. +[36] P.F. de Salas and S. Pastor, Relic neutrino decoupling with flavour oscillations revisited, JCAP +07 (2016) 051 [1606.06986]. +[37] M. Drees, F. Hajkarim and E.R. Schmitz, The Effects of QCD Equation of State on the Relic +Density of WIMP Dark Matter, JCAP 06 (2015) 025 [1503.03513]. +[38] G.F. Giudice, E.W. Kolb and A. Riotto, Largest temperature of the radiation era and its +cosmological implications, Phys. Rev. D 64 (2001) 023508 [hep-ph/0005123]. +[39] Planck collaboration, Planck 2018 results. VI. Cosmological parameters, Astron. Astrophys. +641 (2020) A6 [1807.06209]. +[40] BICEP, Keck collaboration, Improved Constraints on Primordial Gravitational Waves using +Planck, WMAP, and BICEP/Keck Observations through the 2018 Observing Season, Phys. +Rev. Lett. 127 (2021) 151301 [2110.00483]. +[41] T.-H. Yeh, J. Shelton, K.A. Olive and B.D. Fields, Probing physics beyond the standard model: +limits from BBN and the CMB independently and combined, JCAP 10 (2022) 046 +[2207.13133]. +– 20 – + +[42] K. Abazajian et al., CMB-S4 Science Case, Reference Design, and Project Plan, 1907.04473. +[43] CMB-HD collaboration, Snowmass2021 CMB-HD White Paper, 2203.05728. +[44] COrE collaboration, COrE (Cosmic Origins Explorer) A White Paper, 1102.2181. +[45] EUCLID collaboration, Euclid Definition Study Report, 1110.3193. +[46] I. Ben-Dayan, B. Keating, D. Leon and I. Wolfson, Constraints on scalar and tensor spectra +from Neff, JCAP 06 (2019) 007 [1903.11843]. +[47] SPT-3G collaboration, SPT-3G: A Next-Generation Cosmic Microwave Background +Polarization Experiment on the South Pole Telescope, Proc. SPIE Int. Soc. Opt. Eng. 9153 +(2014) 91531P [1407.2973]. +[48] Simons Observatory collaboration, The Simons Observatory: Science goals and forecasts, +JCAP 02 (2019) 056 [1808.07445]. +[49] A. Ringwald, J. Schütte-Engel and C. Tamarit, Gravitational Waves as a Big Bang +Thermometer, JCAP 03 (2021) 054 [2011.04731]. +[50] A. Ringwald and C. Tamarit, Revealing the cosmic history with gravitational waves, Phys. Rev. +D 106 (2022) 063027 [2203.00621]. +[51] M. Maggiore, Gravitational wave experiments and early universe cosmology, Phys. Rept. 331 +(2000) 283 [gr-qc/9909001]. +[52] LISA collaboration, Laser Interferometer Space Antenna, arXiv e-prints (2017) +arXiv:1702.00786 [1702.00786]. +[53] M. Punturo et al., The Einstein Telescope: A third-generation gravitational wave observatory, +Class. Quant. Grav. 27 (2010) 194002. +[54] S. Hild et al., Sensitivity Studies for Third-Generation Gravitational Wave Observatories, +Class. Quant. Grav. 28 (2011) 094013 [1012.0908]. +[55] B. Sathyaprakash et al., Scientific Objectives of Einstein Telescope, Class. Quant. Grav. 29 +(2012) 124013 [1206.0331]. +[56] M. Maggiore et al., Science Case for the Einstein Telescope, JCAP 03 (2020) 050 [1912.02622]. +[57] J. Crowder and N.J. Cornish, Beyond LISA: Exploring future gravitational wave missions, +Phys. Rev. D 72 (2005) 083005 [gr-qc/0506015]. +[58] V. Corbin and N.J. Cornish, Detecting the cosmic gravitational wave background with the big +bang observer, Class. Quant. Grav. 23 (2006) 2435 [gr-qc/0512039]. +[59] G.M. Harry, P. Fritschel, D.A. Shaddock, W. Folkner and E.S. Phinney, Laser interferometry +for the big bang observer, Class. Quant. Grav. 23 (2006) 4887. +[60] F.-Y. Li, M.-X. Tang and D.-P. Shi, Electromagnetic response of a Gaussian beam to high +frequency relic gravitational waves in quintessential inflationary models, Phys. Rev. D 67 +(2003) 104008 [gr-qc/0306092]. +[61] E. Armengaud et al., Conceptual Design of the International Axion Observatory (IAXO), +JINST 9 (2014) T05002 [1401.3233]. +[62] IAXO collaboration, Physics potential of the International Axion Observatory (IAXO), JCAP +06 (2019) 047 [1904.09155]. +[63] D.J. Gross and R. Jackiw, Low-Energy Theorem for Graviton Scattering, Phys. Rev. 166 +(1968) 1287. +[64] T. Gleisberg, F. Krauss, K.T. Matchev, A. Schalicke, S. Schumann and G. Soff, Helicity +formalism for spin-2 particles, JHEP 09 (2003) 001 [hep-ph/0306182]. +– 21 – + +[65] P. de Aquino, K. Hagiwara, Q. Li and F. Maltoni, Simulating graviton production at hadron +colliders, JHEP 06 (2011) 132 [1101.5499]. +[66] H. van Dam and M.J.G. Veltman, Massive and massless Yang-Mills and gravitational fields, +Nucl. Phys. B 22 (1970) 397. +– 22 – + diff --git a/oNFIT4oBgHgl3EQfuyvd/content/tmp_files/load_file.txt b/oNFIT4oBgHgl3EQfuyvd/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ea407965bcd24513bf8dba4595c2da1e5a09632 --- /dev/null +++ b/oNFIT4oBgHgl3EQfuyvd/content/tmp_files/load_file.txt @@ -0,0 +1,845 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf,len=844 +page_content='Detectable Gravitational Wave from Graviton Bremsstrahlung during Reheating Basabendu Barman,a Nicolás Bernal,b Yong Xu,c and Óscar Zapatad aInstitute of Theoretical Physics, Faculty of Physics, University of Warsaw ul.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pasteura 5, 02-093 Warsaw, Poland bNew York University Abu Dhabi PO Box 129188, Saadiyat Island, Abu Dhabi, United Arab Emirates cPRISMA+ Cluster of Excellence and Mainz Institute for Theoretical Physics Johannes Gutenberg University, 55099 Mainz, Germany dInstituto de Física, Universidad de Antioquia Calle 70 # 52-21, Apartado Aéreo 1226, Medellín, Colombia E-mail: basabendu88barman@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='com, nicolas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='bernal@nyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='edu, yonxu@uni-mainz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='de, oalberto.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='zapata@udea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='co Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We revisit graviton production via Bremsstrahlung from the decay of the inflaton during inflationary reheating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Using two complementary computational techniques, we first show that such 3-body differential decay rates differ from previously reported results in the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We then compute the stochastic gravitational wave (GW) background that forms during the period of reheating, when the inflaton perturbatively decays with the radiative emission of gravitons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' By computing the number of relativistic degrees of freedom in terms of ∆Neff, we constrain the resulting GW energy density from BBN and CMB.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Finally, we project current and future GW detector sensitivities in probing such a stochastic GW background, which typically peaks in the GHz to THz ballpark, opening up the opportunity to be detected with microwave cavities and space-based GW detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='11345v1 [hep-ph] 26 Jan 2023 Contents 1 Introduction 1 2 The Framework 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 Decay into Scalars 3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Decay into Fermions 4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3 Decay into Vectors 5 3 Gravitational Wave Contribution to ∆Neff 5 4 Gravitational Wave Spectrum 7 5 Conclusions 10 A Feynman Rules for Relevant Vertices 11 B Calculation of the Decay Widths 11 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 Scalar Case 11 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 Polarization Tensor in Explicit Form 11 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Polarization Sum 14 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Fermionic Case 15 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3 Vector Case 17 1 Introduction The existence of a primordial gravitational wave (GW) background is one of the most cru- cial predictions of the inflationary scenario of the early universe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Stochastic GWs can have several sources, viz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', from the quantum fluctuations during inflation [1–4] that give rise to tensor perturbations, during preheating [5–9] when rapid particle production via parametric resonance occurs or from oscillations of cosmic string loops [10–13], originated from, for exam- ple, a spontaneously broken U(1) symmetry (gauged or global).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' However, as pointed out in Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [14, 15], stochastic GWs of primordial origin can be sourced from the decay of the infla- ton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 In that case, after the end of inflation, during the era of reheating, the inflaton field can decay into particles of arbitrary spins, depending on the microscopic nature of its interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Considering gravitons to emerge as quantum fluctuations over the classical background, they inexorably couple to matter, leading to a graviton production from inflaton decays, similar to the Bremsstrahlung process as considered in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' It is then unavoidable to have inflaton decay as a source of the primordial GW background.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' With this motivation, in this work, we revisit the scenario in which the inflaton can interact with bosons or fermions, leading to its perturbative decay during reheating, resulting in the production of a standard model (SM) radiation bath.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Here, we would like to emphasize that inflaton decay via trilinear couplings fully drains the inflaton energy, allowing the Uni- verse to transit into a radiation-domination phase [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' By considering fluctuations over a flat background, we introduce the dynamical (massless) graviton field of spin 2 that communicates 1Such graviton can also act as a mediator in the production of the dark matter relic abundance [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 1 – with all other matter fields through the energy-momentum tensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' This eventually leads to 3-body decay of the inflaton, involving a pair of scalars, fermions, or vector bosons, along with the radiative emission of a graviton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In computing the 3-body decay widths, we follow two complementary approaches: a) we explicitly construct the graviton polarization tensors, and b) we utilize the polarization sum and show that our expressions converge in either case, however, differing from previous analyses reported in Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [14, 15, 19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' It is then possible to compute the GW energy density from the differential 3-body decay width of the inflaton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As is well known, in order for Big Bang Nucleosynthesis (BBN) to proceed successfully, the energy budget of the Universe must not comprise a significant amount of extra relativistic species, including GWs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' This condition requires that the energy fraction of GWs to the SM radiation degrees of freedom (DoF) at that time is not greater than about ∼ 10%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Regardless of its origin, the energy density in GW established before BBN acts as radiation, and thus its impact on BBN is fully captured by ∆Neff, which counts the number of relativistic species.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Furthermore, GWs with initial adiabatic conditions leave the same imprint on the CMB as free-streaming dark radiation, and in this case, the limit on the present-day energy density in GWs is Ω(0) GW h2 < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3 × 10−6 [20, 21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We discuss the impact of the CMB measurement of ∆Neff on the GW energy density emitted from the decay of the inflaton, taking into account the evolution of the energy densities during reheating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We compare the predicted spectrum of stochastic GWs with existing and future experiments, finding that the present GW spectrum strongly requires high-frequency GW detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Interestingly, we see that such high-frequency GWs could be detected, for example, with resonant cavity detectors [22, 23] or with space-based futuristic GW detectors [24, 25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We present the underlying interaction Lagrangian and present the 2- and 3-body decay rates in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In Section 3 we calculate the constraints from ∆Neff on the GW energy density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The computation of the primordial GW spectrum is presented in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Finally, we conclude in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In the appendixes, we present our calculations in detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 2 The Framework The underlying interaction Lagrangian for the present set-up can be divided into two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' One part involving a trilinear interaction between the inflaton φ and a pair of complex scalar doublets ϕ with 4 DoF, a pair of vector-like Dirac fermions ψ with 4 DoF, or a pair of massive vector bosons Vµ with 3 DoF, given by L(2) int ⊃ −µ φ |ϕ|2 − yψ ψ ψ φ − gV Vµ V µ φ , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) where the corresponding interaction strengths are parameterized in terms of the couplings µ, yψ, and gV , respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The superscript (2) denotes interactions that lead to a two-body decay of the inflaton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Also, note that the coupling strength µ and gV have mass dimension, while the Yukawa interaction strength yψ is dimensionless.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Here, we remain agnostic about the underlying UV-complete Lagrangian and, for simplicity, work with an effective theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' On the other hand, since we are interested in the unavoidable Bremsstrahlung pro- cess involving gravitons, we expand the metric gµν around Minkowski spacetime: gµν ≃ ηµν + 2 MP hµν, where MP is the reduced Planck mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' This inevitably leads to gravitational interactions that are described by the Lagrangian [26] √−g L(g) int ⊃ − 2 MP hµν T µν, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) – 2 – where hµν refers to the graviton field that appears as a quantum fluctuation on the flat background, and Tµν represents the energy-momentum tensor involving all matter particles involved in the theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Further, we do not consider any non-minimal coupling between the new fields of the theory and gravity;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' hence, this is a minimal scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' All relevant Feynman rules involving the graviton and particles of different spins (0, 1/2, and 1) are elaborated in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The interactions appearing in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) give rise to 2- and 3-body decays of the inflaton into pairs of ϕ, ψ, and V in the final state, along with the emission of a massless graviton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' After production, gravitons propagate and constitute the stochastic GW background, the spectrum of which we shall compute, considering different spins of the final-state products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' With this setup, we now move on to the discussion of three different decay scenarios, where the inflaton φ perturbatively decays into either a pair of bosons or a pair of fermions, with graviton radiation, due to the graviton-matter coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In the following sections, we discuss three cases individually.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 Decay into Scalars We start with the inflaton decay into spin-0 states, where the final-state particles are con- sidered to be complex doublet scalars, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' the SM Higgs doublet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The 2-body decay rate in this case, following the Lagrangian in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1), is given by Γ(0) 0 = 2 M 16 π � µ M �2 � 1 − 4 y2 , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3) where y ≡ m/M, with m being the mass of the daughter particles (independent of their spin).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The factor of 2 appears because of two possible decay channels for the complex scalar doublet in the final state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The subscript represents the spin of the final-state particles, while the superscript (0) denotes the 2-body decay width.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As advocated before, due to the irreducible gravitational interaction (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2)), the final state could also contain a graviton [14], leading to a 3-body decay of φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The general three-body decay diagrams are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 1, with l, ω, p, and q denoting the initial and final four-momentum, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Here, we denote any general final state as F, where F can be a scalar, a fermion, or a gauge boson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The detailed computation of the 3-body decay following two different methodologies, namely the explicit construction of graviton polarization tensors and the polarization sum, is reported in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The differential decay rate for the scalar final state with the emission of a graviton of energy Eω reads dΓ(1) 0 dEω = 1 32 π3 � µ MP �2 �(1 − 2x) (1 − 2x + 2y2) 4x α−1 + y2 (y2 + 2x − 1) x ln �1 + α 1 − α �� , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) with x ≡ Eω/M and α ≡ � 1 − 4 y2 1 − 2x , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5) with a graviton energy spanning the range 0 < Eω ≤ M �1 2 − 2 y2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='6) 2The amplitude of the bottom right diagram is proportional to ηµνϵµν (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3)) and therefore vanishes due to the traceless condition for a massless graviton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 3 – Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Feynman diagrams for an inflaton decay into a pair of particles F, along with a radiated graviton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Here F could be a scalar ϕ, a fermion ψ, or a vector V , while hµν is the graviton tensor field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We denote the incoming and outgoing momenta with dashed arrowheads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Since the differential rate in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) plays a key role in our subsequent calculation, we would like to make some remarks before proceeding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Note that a graviton could carry at most half of the inflaton energy, which occurs in a limit where the daughter particle mass is zero, namely y → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In such a case, the differential decay rate vanishes as the phase space closes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' More generally, the differential decay rate should also vanish when x → 1 2 − 2 y2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We notice that our result differs from that reported in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (7) of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Decay into Fermions Following the second term in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1), we compute the 2-body decay of φ into a pair of fermions in the final state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In that case, the decay width is given by Γ(0) 1/2 = y2 ψ 8π M � 1 − 4 y2�3/2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='7) As before, one can compute the differential rate of the three-body final state involving a pair of ψ’s and a graviton in the final state, leading to dΓ(1) 1/2 dEω = y2 ψ 64 π3 � M MP �2 � 1 − 2x x α−1 � 8x y2 + 2x (x − 1) − 8y4 − 2y2 + 1 � + 4 y2 � (5 − 8x) y2 − (x − 1)2 − 4y4� x ln �1 + α 1 − α � � , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='8) see Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Interestingly, we again find that our expression for the 3-body decay rate differs from those reported in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (8) of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [14] and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 4 – F p m m D q F F + p p h nn, m m t J q q F2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3 Decay into Vectors For inflaton decays to massive vectors via the trilinear interaction term φ VµV µ, the 2-body decay rate is given by Γ(0) 1 = M 64 π �gV M �2 1 − 4 y2 + 12 y4 y4 � 1 − 4 y2 , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='9) while the 3-body differential decay rate reads (see Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3 for details of the computa- tion) dΓ(1) 1 dEω = 1 1920 π3 x y4 � gV MP �2 � α � 360 (1 − 2x) y6 + 4 (4x (23 x − 5) + 15) y4 + 2 (2 x − 1) (28 x (14 x − 5) + 15) y2 + (1 − 2x)2 (4 x (2 x − 5) + 15) � + 60 y2 � 12y6 + 16(x − 1)y4 + (5 + 4x(4x − 3))y2 − (1 − 2x)2(1 + 2x) � ln �1 + α 1 − α � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='10) Note that factor 1/y4 comes from the polarization sum for the massive vector, and therefore the massless case cannot be recovered in the limit y → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We would like to mention that our results in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='9) and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='10) differ from the ones reported in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (4) and (7) of Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 3 Gravitational Wave Contribution to ∆Neff As we know, to switch to the standard hot Big Bang cosmology after inflation, the inflaton energy must be transferred into SM radiation DoF, which eventually thermalize and dominate the Universe’s energy budget.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' This transition process, known as reheating, is typically marked by the equality between the inflaton and radiation energy densities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The reheating process must end before the onset of BBN, which occurs at TBBN ≃ 4 MeV [27–31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Now, in order for BBN to proceed successfully, the energy budget of the Universe must not comprise a significant amount of extra relativistic species, including GWs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Regardless of its origin, the energy density established in GW before BBN acts as radiation, and thus its impact on BBN is fully captured in terms of ∆Neff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Therefore, an excess of the GW energy density around BBN can be restricted by considering the (present and future) bounds on ∆Neff from CMB, BBN, and combined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In this section, we discuss our calculations considering that the inflaton φ oscillates in a simple quadratic potential, which implies that its energy density scales as non-relativistic matter during reheating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The number of effective neutrinos Neff is defined from the expression of the radiation energy density in the late universe (at a photon temperature T∆Neff) as ρrad(T∆Neff) = ργ + ρν + ρGW = � 1 + 7 8 �Tν Tγ �4 Neff � ργ(T∆Neff) , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) where ργ, ρν, and ρGW correspond to the photon, SM neutrino, and GW energy densities, respectively, with Tν/Tγ = (4/11)1/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Within the SM, the prediction taking into account the non-instantaneous neutrino decoupling is Neff (SM) = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='046 [32–36], whereas the presence of GWs implies ∆Neff ≡ Neff − NSM eff = 8 7 �11 4 � 4 3 ρGW(T∆Neff) ργ(T∆Neff) = 8 7 �11 4 g⋆s(T∆Neff) g⋆s(Trh) � 4 3 g⋆(Trh) 2 ρGW(Trh) ρR(Trh) , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) – 5 – where ρR(T) = π2 30 g⋆(T) T 4, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3) s(T) = 2π2 45 g⋆s(T) T 3 (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) are the SM radiation energy density and the SM entropy density, with g⋆(T) and g⋆s(T) the numbers of relativistic degrees of freedom [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The evolution of inflaton, SM radiation, and GW energy densities can be tracked using the Boltzmann equations3 dρφ dt + 3 H ρφ = − � Γ(0) + Γ(1)� ρφ , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5) dρR dt + 4 H ρR = +Γ(0) ρφ + � dΓ(1) dEω M − Eω M ρφ dEω , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='6) dρGW dt + 4 H ρGW = + � dΓ(1) dEω Eω M ρφ dEω , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='7) where H stands for the Hubble expansion rate given by H2 = ρφ + ρR + ρGW 3 M2 P , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='8) while Γ(0) and Γ(1) are the 2- and 3-body decay widths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The factors (M − Eω)/M and Eω/M correspond to the fractions of inflaton energy injected into SM radiation and GWs, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' It follows that d(ρGW/ρR) da ≃ 1 a H ρφ ρR �� dΓ(1) dEω Eω M dEω − ρGW ρR Γ(0) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='9) This expression can be integrated during reheating, that is, for amax ≤ a ≤ arh, corresponding to photon temperatures Tmax ≥ T ≥ Trh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Importantly, during reheating in which the SM thermal bath is produced and the universe transitions to radiation domination, the bath temperature may rise to a value Tmax that exceeds Trh [38].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' That the maximum temperature of the thermal bath may reach Tmax > Trh before cooling is not apparent if one takes the instantaneous decay approximation for reheating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We note that during reheating ρφ(a) = ρφ(arh) �arh a �3 , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='10) T(a) = Trh �arh a �3/8 , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='11) as the inflaton is assumed to be non-relativistic and to decay with a constant decay width into SM radiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We emphasize that the scaling of the SM temperature is due to the fact that the SM radiation is not free, but is sourced by inflaton decays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The end of the reheating corresponds to the moment in which the equality ρR(Trh) = ρφ(Trh) is realized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Additionally, assuming that at the beginning of the reheating, the universe had no SM radiation or GWs, 3We would like to emphasize that our approach of computation of the GW energy density takes care of the evolution of energy densities beyond the instantaneous approximation as was done in Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [14, 15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 6 – and taking into account that at the end of the reheating Γ(0) ≃ H(Trh), Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='9) admits the analytical solution ρGW(Trh) ρR(Trh) ≃ � M/2 0 1 Γ(0) dΓ(1) dEω Eω M dEω � 1 − � Trh Tmax �8/3� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='12) We notice that within the approximation of an instantaneous decay of the inflaton, the ex- pression in the squared brackets reduces to one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' For the different decay channels, in the limit y → 0, one has ρGW(Trh) ρR(Trh) ≃ Cρ M2 π2M2 P � 1 − � Trh Tmax �8/3� , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='13) where Cρ = 1/96 for scalars, 3/128 for fermions, and 127/1800 for vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Therefore, the corresponding GW contribution to ∆Neff is ∆Neff ≃ C∆Neff � M MP �2 � 1 − � Trh Tmax �8/3� , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='14) with C∆Neff ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='01 for scalars, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='03 for fermions, and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='08 for vectors, where we have taken g⋆s(T∆Neff) ≃ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Again, note that in the instantaneous reheating approximation, the square bracket in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='14) becomes unity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' To avoid jeopardizing the successful predic- tions from BBN, the reheating temperature must satisfy Trh ≥ TBBN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Furthermore, recent BICEP/Keck measurements have offered a stronger bound (than that of previous Planck results [39]) on the tensor-to-scalar ratio r < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='035 [40], implying Trh ≲ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5 × 1015 GeV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Within the framework of ΛCDM, Planck legacy data produces Neff = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='99±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='34 at 95% CL [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Once the baryon acoustic oscillation (BAO) data are included, the measurement becomes more stringent: Neff = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='99 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='17 at 1σ CL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Upcoming CMB experiments, such as SPT-3G [47] and the Simons Observatory [48], will soon improve Planck’s precision on Neff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In particular, CMB-S4 [42] and CMB-HD [43] will be sensitive to a precision of ∆Neff ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='06 and ∆Neff ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='027 at 95% CL, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As calculated in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [41], a combined analysis from BBN and CMB results in Neff = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='880±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='144.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The next generation of satellite missions, such as COrE [44] and Euclid [45], shall impose limits at 2σ on ∆Neff ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Furthermore, as mentioned in Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [46], a hypothetical cosmic-variance-limited (CVL) CMB polarization experiment could presumably be reduced to as low as ∆Neff ≲ 3 × 10−6, although this does not seem to be an experimentally plausible scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Figure 2 illustrates the constraint from ∆Neff following Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='14), considering Tmax ≫ Trh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As discussed above, we show the present and future limits of ∆Neff on the GW energy density for scenarios in which the graviton decays into a pair of scalars (red dotted line), a pair of Dirac fermions (blue dashed line), or a pair of massive vector bosons (black solid line).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As we can see, the impact of GW production on ∆Neff through all these channels is very challenging not only for present, but even for the projected experimental sensitivities, unless M ∼ MP .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' A large inflaton mass is required to overcome the strong Planck suppression originating from minimal graviton coupling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Note that there is a possibility for experiments such as COrE or Euclid to probe the vector scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 4 Gravitational Wave Spectrum After being produced from inflaton 3-body decays, gravitons would propagate and spread in the whole universe, forming a homogeneous and isotropic stochastic GW background at – 7 – 1015 1016 1017 1018 M [GeV] 10−6 10−5 10−4 10−3 10−2 10−1 100 ∆Neff Planck 18 BBN+CMB CMB-S4 CMB-HD COrE/Euclid CVL y → 0 Tmax ≫ Trh Vector Fermion Scalar Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Contribution of GW energy density to ∆Neff (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='14) with Tmax ≫ Trh), where the solid black, dashed blue, and dot-dashed red slanted straight lines correspond to scalar, fermion, and vector boson final states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We show the present limits from PLANCK [39], CMB+BBN combined [41], and future limits from CMB-S4 [42], CMB-HD [43], COrE [44]/ Euclid [45], and also hypothetical CVL experiment [46], from top to bottom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' present, after the attenuation of its energy and amplitude due to cosmic expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The primordial GW spectrum at present ΩGW(f) for a frequency f is defined by ΩGW(f) = 1 ρc dρGW d ln f = Ω(0) γ d(ρGW/ρR) d ln f = Ω(0) γ g⋆(Trh) g⋆(T0) � g⋆s(T0) g⋆s(Trh) �4/3 d(ρGW(Trh)/ρR(Trh)) d ln Eω , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) where ρc is the critical energy density, and Ω(0) γ h2 ≃ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='47 × 10−5 is the observed photon abundance [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Equation (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) must be evaluated at an energy Eω = 2π f a0 arh = 2π f Trh T0 �g⋆s(Trh) g⋆s(T0) �1/3 , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) taking into account the redshift of the GW energy between reheating and the present epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Similarly to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='12), the differential ratio of GW to SM radiation energy densities at the end of reheating is d(ρGW(Trh)/ρR(Trh)) dEω ≃ dΓ(1) dEω Eω M 1 Γ(0) � 1 − � Trh Tmax �8/3� , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3) where again, within the approximation of an instantaneous decay of the inflaton, the expres- sion in the squared brackets reduces to one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' For the inflaton decay into particles with different – 8 – 10−3 100 103 106 109 1012 1015 f [Hz] 10−37 10−35 10−33 10−31 10−29 10−27 10−25 10−23 10−21 hc LISA BBO uDECIGO ET Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cavities IAXO SPD IAXO HET GB Planck COrE/Euclid CVL y → 0 Tmax ≫ Trh \x8c \x8d Vector Fermion Scalar Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Dimensionless strain hc as a function of the GW frequency f, for the two benchmarks (\x8c and \x8d) described in the text, assuming Tmax ≫ Trh and y → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The black solid, blue dashed, and red dotted curves correspond to decays into vector, fermion, and scalar final states, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Projected sensitivities from different GW detection experiments are also shown in orange (adapted from Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [49, 50]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The gray dashed diagonal lines are CMB bounds on ∆Neff from Planck, COrE/Euclid, and hypothetical CVL experiment, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' spins, one has ΩGW(f) ≃ CΩGW Trh 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5 × 1015 M MP f 1012 Hz , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) with CΩGW ≃ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4 × 10−8 for scalars, CΩGW ≃ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='8 × 10−8 for fermions, and CΩGW ≃ 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 × 10−8 for vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The latter expression is valid for frequencies f smaller than f ≲ M 4π T0 Trh � g⋆s(T0) g⋆s(Trh) �1/3 ≃ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 × 1012 � M MP � �5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5 × 1015 GeV Trh � Hz , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5) where we have used g⋆s(T0) = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='94 and g⋆s(Trh) = 106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Finally, we compute the dimensionless strain defined as [51] hc(f) = 1 f � 3 H2 0 ΩGW(f) 2π2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='26 × 10−18 �Hz f � � h2 ΩGW(f) , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='6) where H0 ≡ H(T0) ≃ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='44 × 10−42 GeV is the present-day Hubble parameter, and h = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='674 [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 3 we show the dimensionless strain hc as a function of the GWs frequency – 9 – f, for two benchmark points: \x8c M = MP /10 and Trh = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5×1015 GeV, and \x8d M = MP /103 and Trh = MP /(2 × 104).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In the same plane, we project the limits from several proposed GW detectors, for example, LISA [52], the Einstein Telescope (ET) [53–56], the Big Bang Observer (BBO) [57–59], ultimate DECIGO (uDECIGO) [24, 25], GW-electromagnetic wave conversion in the vacuum (solid) and in a Gaussian beam (GB) (dotted) [49, 60], resonant cavities [22, 23], and the International Axion Observatory (IAXO) [61, 62].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We have projected the ∆Neff bounds from Planck, COrE/Euclid and CVL from Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 2 as a bound on the GW strain using [51] � d (ln f) ΩGW(f) h2 ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='6 × 10−6 ∆Neff .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='7) These are shown by the diagonal straight gray lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As one can infer from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5), a larger M/Trh ratio corresponds to a higher frequency, which is also reflected in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 3 curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As we see, only microwave cavity detectors are capable of probing the high-frequency regime of the GW spectrum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Detectors such as uDECIGO, on the other hand, might be able to reach the lower frequency part of the spectrum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 5 Conclusions Inflaton 3-body decays is a source of the stochastic gravitational wave (GW) background, due to the inexorable graviton Bremsstrahlung.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In this work, we have revisited such three-body decay rates, considering the perturbative coupling of the inflaton with a pair of massive spin-0 bosons, spin-1/2 fermions, and spin-1 vector bosons, along with the radiative emission of a massless graviton from either the initial or the final states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We have found that the previously reported results show discrepancies with our findings in all three cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' To make our claim more robust, we employed two distinct procedures in calculating the graviton polarization and found that they agree with each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' With this improvement over existing results, we then numerically calculated the contri- bution of the GW energy density to the number of degrees of freedom around the time of BBN and CMB, typically encoded in ∆Neff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' We have taken care of the evolution of the energy densities of inflaton, radiation, and GW by solving a set of coupled Boltzmann equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Due to Planck-scale suppression from minimal gravitational coupling, the GW energy density from inflaton Bremsstrahlung stays well below the CMB bounds on ∆Neff, regardless of the spin of the final-state particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' As the spectrum of GW peaks in the GHz to THz ballpark, this primordial GW signature remains beyond the reach of most detector facilities;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' however, it may leave a footprint in resonant cavity detectors or even in upcoming space-based GW detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Acknowledgments The authors thank Manuel Drees, Yann Mambrini, and Simon Cléry for useful discussions, Rome Samanta for providing the experimental limits, and also Yong Tang and Da Huang for helpful communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' NB received funding from the Spanish FEDER / MCIU-AEI under the grant FPA2017-84543-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' OZ has received funding from the Ministerio de Ciencia, Tec- nología e Innovación (MinCiencias - Colombia) through Grants 82315-2021-1080 and 80740- 492-2021, and has been partially supported by Sostenibilidad-UdeA and the UdeA/CODI Grant 2020-33177.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 10 – A Feynman Rules for Relevant Vertices Here, we focus on a massless spin-2 graviton field, whose polarization tensor ϵµν has to satisfy the following conditions [63, 64] ϵi µν = ϵi νµ symmetric, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) ωµ ϵi µν = 0 transverse, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) ηµν ϵi µν = 0 traceless, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3) ϵi µν ϵj ⋆ µν = δij orthonormal, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) for i, j = 1, 2 being the polarization indices and ω the graviton four momentum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The polarization sum for the massless graviton is [65] � pol ϵ⋆µνϵαβ = 1 2 � ˆηµαˆηνβ + ˆηµβ ˆηνα − ˆηµν ˆηαβ� , (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5) with ˆηµν ≡ ηµν − ωµ¯ων + ¯ωµων ω · ¯ω , (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='6) where ω = (Eω, ⃗ω) and ¯ω = (Eω, −⃗ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' For a massless graviton, we have ω·¯ω = E2 ω+⃗ω2 = 2E2 ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The polarization sum in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5) indeed preserves the symmetric, transverse, traceless, and orthonormal conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4 Note that due to the van Dam-Veltman discontinuity [65, 66], one cannot obtain the massless graviton propagator from the massive one simply by taking the limit of the graviton mass mhµν → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' From the interaction Lagrangian in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) the relevant Feynman rules can be ex- tracted, and we tabulate them in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B Calculation of the Decay Widths In this section, we present details of the computation of the differential rates for the inflaton decay into three-body final states, including a graviton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 Scalar Case To calculate the differential decay rate and cross-check the results, we present two different strategies based on: i) an explicit construction for the graviton polarization tensor and ii) polarization sum for the massless graviton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1 Polarization Tensor in Explicit Form Without losing generality, we choose a coordinate system in which the graviton moves along the x direction, and hence the four-momentum of the graviton is ω = (Eω, ωx, 0, 0), where ω2 = 0, and then ωx = Eω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The four-momentum of the inflaton and its other two decay products are l = (M, 0, 0, 0), p = (Ep, px, py, pz), and q = (M −Ep −Eω, −px −ωx, −py, −pz), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 4We note that the naive polarization sum � pol ϵ⋆µνϵαβ = 1 2 � ηµαηνβ + ηµβηνα − ηµνηαβ� violates trans- verse, traceless, and orthonormal conditions and therefore should not be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 11 – Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Relevant graviton-matter vertices for scalar (ϕ), fermion (ψ) and vector boson (V ), from top to bottom, following Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The two polarization tensors that meet traceless, transverse, symmetric, and orthonor- mal conditions described in Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) to (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) can be explicitly written as [66] ϵ1 µν = 1 √ 2 � ��� 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 −1 � ��� and ϵ2 µν = 1 √ 2 � ��� 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 � ��� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1) – 12 – P1μP2v + p1vp2μ- nμv (p1· P2 -m²) P2 p1 h In (-dg+ d)a- (d+ Id) + (d+ Id) p2 山 hΛK [naxμ(P1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='P2 -m2) - nxP1μP2 + kμP1vP2 -NμP1xP2 Mp +p1xp2-Nkμ (p1 P2 -m2) - Nμp1p2 + μp1vp2k N vaμ (p1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' P2 - m2) l P2Using the Feynman rules of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 4, the amplitudes for the 3-body decays shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 1 are iM1 = −i µ MP lµ lν ϵ⋆µν i M Eω = 0 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2) iM2 = i µ MP pµ pν ϵ⋆µν j p · ω , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3) iM3 = i µ MP qµ qν ϵ⋆µν k M Eω − p · ω , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='4) iM4 ∝ ηµνϵµν = 0 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5) where M1,2 corresponds to the diagrams in the upper left and upper right panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 1, while M3,4 correspond to the lower left and lower right panels, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1), we notice that M1 = 0 as the decay takes place in the rest frame of the inflaton with l = (M, 0, 0, 0), while � pol |M2|2 = µ2 (p · ω)2M2 P � j (pµpνϵ⋆µν j )(pµpνϵµν j ) = µ2 2(p · ω)2M2 P � (p2 y − p2 z)2 + 4p2 yp2 z � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='6) Similarly, � pol |M3|2 = µ2 2(M Eω − p · ω)2 M2 P � ((−py)2 − (−pz)2)2 + 4p2 yp2 z � = µ2 2(M Eω − p · ω)2 M2 P � (p2 y − p2 z)2 + 4p2 yp2 z � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='7) and the cross-term turns out to be � pol (M2M⋆ 3) = µ2 2(M Eω − p · ω)(p · ω)M2 P �� p2 y − p2 z �2 + 4 p2 y p2 z � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='8) Note that � pol (M1M⋆ 2) = 0 and � pol (M1M⋆ 3) = 0 as M1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The total squared amplitude is then � pol |M|2 = µ2 2M2 P � 1 p · ω + 1 M Eω − p · ω �2 � p2 y + p2 z �2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='9) Since p · ω = EpEω − pxEω, we have px = EpEω − p · ω Eω , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='10) which together with m2 ≡ p2 = E2 p − (p2 x + p2 y + p2 z) implies � pol |M|2 = µ2 2M2 P � 1 p · ω + 1 M Eω − p · ω �2 � 2 Ep Eω p · ω − �p · ω Eω �2 − m2 �2 = µ2 � 4E2 ωm2 − 8EpEωM(Eω + Ep) + 4M2(E2 p + 3EpEω) + E2 ω − 4(Eω + Ep)M3 + M4) � 2 M2 P E2ω M2 (M − 2 Ep)2 [M − 2 (Ep − Eω)]2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='11) – 13 – Finally, utilizing dΓ dEω = 1 (2π)3 1 8 M � Ep,max Ep,min dEp |M|2, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='12) with Ep,max = 1 2 � M − Eω + Eω � M2 − 2MEω − 4m2 M(M − 2Eω) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='13) Ep,min = 1 2 � M − Eω − Eω � M2 − 2MEω − 4m2 M(M − 2Eω) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='14) one has the total differential cross-section as dΓ(1) 0 dEω = 2 64 π3 � µ MP �2 �(2x − 1) (2x − 2y2 − 1) 4x α−1 + y2(y2 + 2x − 1) x log �1 + α 1 − α �� (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='15) for inflaton decays to complex scalars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Note that the extra factor 2 comes from two possible decay channels of the inflaton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Polarization Sum We now employ the second formalism of the calculation, namely the tensor polarization sum formalism, as mentioned in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The squared amplitudes are � pol |M1|2 = µ2 M2E2ω M2 P lµlνlαlβ � pol ϵ⋆µνϵαβ = µ2 2M2E2ω M2 P lµlνlαlβ � ˆηµαˆηνβ + ˆηµβ ˆηνα − ˆηµν ˆηαβ� = µ2 2M2E2ω M2 P � l2l2 − 4l2 (l · ω)(l · ¯ω) 2E2ω + 4(l · ω)2(l · ¯ω)2 4E4ω � = µ2 2 M2E2ω M2 P � l2 − (l · ω)(l · ¯ω) E2ω �2 = µ2 2M2E2ω M2 P � M2 − M2� = 0 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='16) � pol |M2|2 = µ2 M2 P pµ pν pα pβ (p · ω)2 � pol ϵ⋆µνϵαβ = µ2 2(p · ω)2 M2 P � p2 − (p · ω)(p · ¯ω) E2ω �2 = µ2 2(p · ω)2 M2 P � m2 − (p · ω)(p · ¯ω) E2ω �2 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='17) � pol |M3|2 = µ2 2(M Eω − p · ω)2 M2 P � q2 − (q · ω)(q · ¯ω) E2ω �2 = µ2 2(M Eω − p · ω)2 M2 P � m2 − (q · ω)(q · ¯ω) E2ω �2 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='18) – 14 – � pol (M2M⋆ 3) = µ2 (p · ω)(M Eω − p · ω) M2 P pµpνqαqβ � pol ϵ⋆µνϵαβ = µ2 2(p · ω)(M Eω − p · ω)M2 P � 2 � (p · q) − (p · ω)(q · ¯ω) + (p · ¯ω)(q · ω) 2E2ω �2 − � p2q2 − p2(q · ω)(q · ¯ω) + q2(p · ω)(p · ¯ω) E2ω + (p · ω)(p · ¯ω)(q · ω)(q · ¯ω) E4ω � � = µ2 2(p · ω)(M Eω − p · ω)M2 P � 2 � (p · q) − (p · ω)(q · ¯ω) + (p · ¯ω)(q · ω) 2E2ω �2 − � p2 − (q · ω)(q · ¯ω) E2ω � � q2 − (p · ω)(p · ¯ω) E2ω � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='19) Note that Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='16) implies M1 = 0, which agrees with Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Therefore, the other two interference terms � pol (M1M⋆ 2) = � pol (M1M⋆ 3) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Using the four vectors, one ends up with p · ω = M � Eω + Ep − 1 2M � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='20) p · ¯ω = 2EpEω − p · ω , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='21) q · ω = MEω − p · ω , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='22) q · ¯ω = (l − p − ω)¯ω = MEω − p · ¯ω − 2E2 ω = MEω − 2EpEω + p · ω − 2E2 ω , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='23) p · q = 1 2 � (p + q)2 − 2m2� = 1 2 � (l − ω)2 − 2m2� = 1 2 � M2 − 2MEω − 2m2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='24) With these relations, one obtains � pol |M|2 = µ2 � 4E2 ωm2 − 8EpEωM(Eω + Ep) + 4M2(E2 p + 3EpEω) + E2 ω − 4(Eω + Ep)M3 + M4) � 2M2 P E2ωM2(M − 2Ep)2[M − 2(Ep − Eω)]2 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='25) and further dΓ(1) 0 dEω = µ2 32π3M2 P �(2x − 1) (2x − 2y2 − 1) 4x α−1 + y2(y2 + 2x − 1) x log �1 + α 1 − α �� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='26) which agrees with Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2 Fermionic Case Using the list of Feynman rules in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 4, the amplitudes for the inflaton decay into a Dirac fermion turn out to be iM1 = −i yψ MP lµ lν ϵ⋆µν M Eω ¯u(p)v(q) , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='27) iM2 = iyψ 2p · ωMP � ¯u(p)(pµγν)(/l + 2m)v(q) � ϵ∗µν, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='28) iM3 = iyψ 2(MEω − p · ω)MP � ¯u(p)(/l − 2m))(qµγν)v(q) � ϵ∗µν, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='29) iM4 ∝ ηµνϵ∗µν = 0 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='30) – 15 – Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Feynman graph for inflaton decay into a pair of Dirac fermions with graviton emission.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' where M1, M2, M3 and M4 corresponds to the diagrams from left to right in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Note that � spin, pol |M1|2 = y2 ψ M2E2ω M2 P lµlνlαlβ � pol ϵ⋆µνϵαβ × Tr � (/q − m)(/p + m) � = (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='16) × y2 ψ µ2 × Tr � (/q − m)(/p + m) � = 0 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='31) which again implies that M1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' In fact, since M1 ∝ Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2), one immediately finds that it vanishes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The other squared amplitudes are given by: � spin, pol |M2|2 = y2 ψ (2p · ω)2 M2 P � pol ϵ⋆µνϵαβ × Tr � pµγν(/l + 2m)(/q − m)(/l + 2m)γαpβ(/p + m) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='32) � spin, pol |M3|2 = y2 ψ 4(MEω − p · ω)2 M2 P � pol ϵ⋆µνϵαβ × Tr � (/l − 2m)qµγν(/q − m)γαqβ(/l − 2m)(/p + m) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='33) � spin, pol (M2M∗ 3) = y2 ψ 4p · ω(MEω − p · ω) M2 P � pol ϵ⋆µνϵαβ × Tr � pµγν(/l + 2m)(/q − m)γαqβ(/l − 2m)(/p + m) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='34) Similarly as before, the other interference terms � pol (M1M⋆ 2) = � pol (M1M⋆ 3) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The total matrix element squared turns out to be � spin, pol |M|2 = y2 ψ M(M − 2Ep)(M − 2Eω)(2(Ep + Eω) − M) + 4E2 ωm2 E2ωM2(M − 2E2p)(M − 2(Ep + Eω))2M2 P × � 4Mm2 � M(4E2 p + 12EpEω + 3E2 ω) − 4M2(Ep + Eω) − 8EpEω(Ep + Eω) + M3� − M2(M − 2Ep)(2E2 ω − 2EωM + M2) [M − 2(Ep + Eω)] + 16E2 ωm4� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='35) – 16 – 山 山 w) p m t w b 山 山 山Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Feynman diagrams for inflaton decay into a pair of massive vectors with graviton emission.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' with which we find dΓ(1) 1/2 dEω = y2 ψM2 64 π3 M2 P � (1 − 2x) xα−1 � 8xy2 + 2(x − 1)x − 8y4 − 2y2 + 1 � + 4y2 � (5 − 8x)y2 − (x − 1)2 − 4y4� x log �1 + α 1 − α � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='36) B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3 Vector Case For the amplitudes of inflaton decay into massive spin-1 final states, we obtain iM1 = −i 2 gV MP lµ lν ϵ⋆µν M Eω ηµ′ν′ε∗µ′(p, λ)ε∗ν′(q, λ) , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='37) and similarly, we find iM2 = −i gV p · ω MP ϵ∗µνε∗µ′(p, λ)ε∗ν′(q, λ) · ησν′ � ηρσ − (p′ρp′σ) m2 � × � ηµνηρµ′ � p′ · p − m2� − ηµνp′ µ′pρ + ηνρp′ µ′pµ − ηρµ′p′ νpµ + ηµµ′p′ νpρ − ηνρηµµ′ � p′ · p − m2� + ηνµ′p′ µpρ − ηρµ′p′ µpν + ηµρp′ µ′pν −ηνµ′ηµρ � p′ · p − m2�� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='38) iM3 = −i gV (MEω − p · ω) MP ϵ∗µνε∗µ′(p, λ)ε∗ν′(q, λ) · ησµ′ � ηρσ − (q′ρq′σ) m2 � × � ηµνηρν′ � q′ · q − m2� − ηµνq′ ν′qρ + ηνρq′ ν′qµ − ηρν′q′ νqµ + ηµν′q′ νqρ − ηνρηµν′ � q′ · q − m2� + ηνν′q′ µqρ − ηρν′q′ µqν + ηµρq′ ν′qν −ηνν′ηµρ � q′ · q − m2�� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='39) while M4 ∝ ηµνϵ∗µν = 0 , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='40) for left to right in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 6, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' The total squared matrix element is given by � spin, pol |M|2 = g2 V 2E4ω M2 m4(M − 2Ep)2(M − 2Ep − 2Eω)2M2 P × 8 � k=0 Ak Ek p , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='41) – 17 – huv Vμ Vμ p p pa m .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='> .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' t huv tr .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='. .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='. b h b b 9 Vv Vv Vy Vvwhere A8 = 256M4(M − 2Eω)2, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='42) A7 = 1024(Eω − M)M4(M − 2Eω)2, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='43) A6 = 128(2Eω − M)M3� − 2Eω(M − 4Eω)m2 − M(M − 2Eω)(12E2 ω − 27MEω + 14M2) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='44) A5 = 128M3(2E2 ω − 3MEω + M2) × � − 6Eω(M − 4Eω)m2 − M(M − 2Eω)(8E2 ω − 25MEω + 14M2) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='45) A4 = 16M3� 4E2 ω(3M − 4Eω)m4 + 4Eω(M − 2Eω)(−60E3 ω + 138ME2 ω − 91M2Eω + 15M3) m2 + M(M − 2Eω)2(16E4 ω − 136ME3 ω + 301M2E2 ω − 250M3Eω + 70M4) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='46) A3 = −32(Eω − M)M3� 4E2 ω(4Eω − 3M)m4 − 4Eω(2Eω − M) × (20E3 ω − 48ME2 ω + 31M2Eω − 5M3)m2 + M2 (M − 2Eω)2 × (12E3 ω − 45ME2 ω + 46M2Eω − 14M3) � , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='47) A2 = 8M � M5(M − 2Eω)(M − Eω)(−26E3 ω + 68ME2 ω − 55M2Eω + 14M3) + 2EωM2(M − 2Eω)(−56E5 ω + 264ME4 ω − 458M2E3 ω + 360M3E2 ω − 126M4Eω + 15M5)m2 + 16E4 ω(3M − 4Eω)m6 + 4E2 ωM(−24E4 ω + 4ME3 ω + 26M2E2 ω − 30M3Eω + 9M4)m4� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='48) A1 = 8(Eω − M)M � (2M − 3Eω)(Eω − M)(2Eω − M)3M6 + 2Eω(2Eω − 3M)(M − 2Eω)2(2E3 ω − 8ME2 ω + 6M2Eω − M3)m2M2 + 4E2 ω(−24E4 ω + 12ME3 ω + 4M2E2 ω − 10M3Eω + 3M4)m4M + 16E4 ω(3M − 4Eω)m6� , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='49) A0 = 192 E6 ω m8 + 32 E4 ω M2(6 E2 ω − 10 M Eω + 3 M2) m6 + (Eω − M)2 M8 (M − 2Eω)4 − 4 E2 ω M2 (2 E2 ω − 2 M Eω + M2)(16 E4 ω − 32 M E3 ω + 6 M2 E2 ω + 10 M3 Eω − 3 M4) m4 − 4 Eω M4 (M − 2Eω)4 (2 E3 ω − 4 ME2 ω + 5 M2 Eω − M3) m2, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='50) with which one can show that the differential decay rate reads dΓ(1) 1 dEω = g2 V 1920 π3 x y4 M2 P � 60y2 � −(1 − 2x)2(1 + 2x) + (5 + 4x(−3 + 4x))y2 + 16(x − 1)y4 + 12y6� log �1 + α 1 − α � + α � 360(1 − 2x)y6 + 4(4x(23x − 5) + 15)y4 + 2(2x − 1)(28x(14x − 5) + 15)y2 + (1 − 2x)2(4x(2x − 5) + 15) �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='51) – 18 – References [1] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Starobinsky, Spectrum of relict gravitational radiation and the early state of the universe, JETP Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 30 (1979) 682.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [2] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Allen, The Stochastic Gravity Wave Background in Inflationary Universe Models, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 37 (1988) 2078.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [3] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Sahni, The Energy Density of Relic Gravity Waves From Inflation, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 42 (1990) 453.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Turner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' White and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lidsey, Tensor perturbations in inflationary models as a probe of cosmology, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 48 (1993) 4613 [astro-ph/9306029].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [5] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Khlebnikov and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Tkachev, Relic gravitational waves produced after preheating, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 56 (1997) 653 [hep-ph/9701423].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [6] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Easther and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lim, Stochastic gravitational wave production after inflation, JCAP 04 (2006) 010 [astro-ph/0601617].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [7] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Dufaux, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Bergman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Felder, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kofman and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Uzan, Theory and Numerics of Gravitational Waves from Preheating after Inflation, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 76 (2007) 123517 [0707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='0875].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [8] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Bethke, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Figueroa and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rajantie, Anisotropies in the Gravitational Wave Background from Preheating, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 111 (2013) 011301 [1304.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2657].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [9] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Figueroa and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Torrenti, Gravitational wave production from preheating: parameter dependence, JCAP 10 (2017) 057 [1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='04533].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [10] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Vilenkin, Gravitational radiation from cosmic strings, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B 107 (1981) 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [11] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cui, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lewicki, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Morrissey and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Wells, Cosmic Archaeology with Gravitational Waves from Cosmic Strings, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 97 (2018) 123505 [1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='03104].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [12] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cui, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lewicki, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Morrissey and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Wells, Probing the pre-BBN universe with gravitational waves from cosmic strings, JHEP 01 (2019) 081 [1808.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='08968].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [13] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Chang and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cui, Gravitational waves from global cosmic strings and cosmic archaeology, JHEP 03 (2022) 114 [2106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='09746].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [14] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Nakayama and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Tang, Stochastic Gravitational Waves from Particle Origin, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B 788 (2019) 341 [1810.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='04975].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [15] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Huang and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Yin, Stochastic Gravitational Waves from Inflaton Decays, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 100 (2019) 043538 [1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='08510].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [16] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Mambrini, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Olive and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Zheng, Post-inflationary dark matter bremsstrahlung, JCAP 10 (2022) 055 [2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='05859].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [17] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Weinberg, Infrared photons and gravitons, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 140 (1965) B516.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [18] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Dufaux, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Felder, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kofman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Peloso and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Podolsky, Preheating with trilinear interactions: Tachyonic resonance, JCAP 07 (2006) 006 [hep-ph/0602144].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [19] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Ghoshal, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Samanta and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' White, Bremsstrahlung High-frequency Gravitational Wave Signatures of High-scale Non-thermal Leptogenesis, 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='10433.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [20] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pagano, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Salvati and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Melchiorri, New constraints on primordial gravitational waves from Planck 2015, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B 760 (2016) 823 [1508.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='02393].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [21] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Caprini and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Figueroa, Cosmological Backgrounds of Gravitational Waves, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 35 (2018) 163001 [1801.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='04268].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 19 – [22] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Berlin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Blas, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Tito D’Agnolo, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Ellis, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Harnik, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kahn et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', Detecting high-frequency gravitational waves with microwave cavities, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 105 (2022) 116011 [2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='11465].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [23] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Herman, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lehoucq and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Fúzfa, Electromagnetic Antennas for the Resonant Detection of the Stochastic Gravitational Wave Background, 2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='15668.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [24] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Seto, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kawamura and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Nakamura, Possibility of direct measurement of the acceleration of the universe using 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='1-Hz band laser interferometer gravitational wave antenna in space, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 87 (2001) 221103 [astro-ph/0108011].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [25] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kudoh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Taruya, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hiramatsu and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Himemoto, Detecting a gravitational-wave background with next-generation space interferometers, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 73 (2006) 064006 [gr-qc/0511145].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [26] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Choi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Shim and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Song, Factorization and polarization in linearized gravity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 51 (1995) 2751 [hep-th/9411092].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [27] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Sarkar, Big bang nucleosynthesis and physics beyond the standard model, Rept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Prog.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 59 (1996) 1493 [hep-ph/9602260].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [28] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kawasaki, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kohri and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Sugiyama, MeV scale reheating temperature and thermalization of neutrino background, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 62 (2000) 023506 [astro-ph/0002127].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [29] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hannestad, What is the lowest possible reheating temperature?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 70 (2004) 043506 [astro-ph/0403291].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [30] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' De Bernardis, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pagano and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Melchiorri, New constraints on the reheating temperature of the universe after WMAP-5, Astropart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 30 (2008) 192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [31] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' de Salas, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lattanzi, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Mangano, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Miele, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pastor and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pisanti, Bounds on very low reheating scenarios after Planck, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 92 (2015) 123534 [1511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='00672].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [32] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Dodelson and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Turner, Nonequilibrium neutrino statistical mechanics in the expanding universe, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 46 (1992) 3372.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [33] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hannestad and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Madsen, Neutrino decoupling in the early universe, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 52 (1995) 1764 [astro-ph/9506015].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [34] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Dolgov, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hansen and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Semikoz, Nonequilibrium corrections to the spectra of massless neutrinos in the early universe, Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B 503 (1997) 426 [hep-ph/9703315].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [35] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Mangano, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Miele, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pastor, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pinto, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pisanti and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Serpico, Relic neutrino decoupling including flavor oscillations, Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B 729 (2005) 221 [hep-ph/0506164].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [36] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' de Salas and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Pastor, Relic neutrino decoupling with flavour oscillations revisited, JCAP 07 (2016) 051 [1606.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='06986].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [37] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Drees, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hajkarim and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Schmitz, The Effects of QCD Equation of State on the Relic Density of WIMP Dark Matter, JCAP 06 (2015) 025 [1503.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='03513].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [38] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Giudice, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Kolb and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Riotto, Largest temperature of the radiation era and its cosmological implications, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 64 (2001) 023508 [hep-ph/0005123].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [39] Planck collaboration, Planck 2018 results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cosmological parameters, Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 641 (2020) A6 [1807.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='06209].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [40] BICEP, Keck collaboration, Improved Constraints on Primordial Gravitational Waves using Planck, WMAP, and BICEP/Keck Observations through the 2018 Observing Season, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 127 (2021) 151301 [2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='00483].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [41] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Yeh, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Shelton, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Olive and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Fields, Probing physics beyond the standard model: limits from BBN and the CMB independently and combined, JCAP 10 (2022) 046 [2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='13133].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 20 – [42] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Abazajian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', CMB-S4 Science Case, Reference Design, and Project Plan, 1907.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='04473.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [43] CMB-HD collaboration, Snowmass2021 CMB-HD White Paper, 2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='05728.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [44] COrE collaboration, COrE (Cosmic Origins Explorer) A White Paper, 1102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2181.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [45] EUCLID collaboration, Euclid Definition Study Report, 1110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3193.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [46] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Ben-Dayan, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Keating, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Leon and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Wolfson, Constraints on scalar and tensor spectra from Neff, JCAP 06 (2019) 007 [1903.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='11843].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [47] SPT-3G collaboration, SPT-3G: A Next-Generation Cosmic Microwave Background Polarization Experiment on the South Pole Telescope, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' SPIE Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Eng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 9153 (2014) 91531P [1407.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='2973].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [48] Simons Observatory collaboration, The Simons Observatory: Science goals and forecasts, JCAP 02 (2019) 056 [1808.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='07445].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [49] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Ringwald, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Schütte-Engel and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Tamarit, Gravitational Waves as a Big Bang Thermometer, JCAP 03 (2021) 054 [2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='04731].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [50] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Ringwald and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Tamarit, Revealing the cosmic history with gravitational waves, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 106 (2022) 063027 [2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='00621].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [51] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Maggiore, Gravitational wave experiments and early universe cosmology, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 331 (2000) 283 [gr-qc/9909001].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [52] LISA collaboration, Laser Interferometer Space Antenna, arXiv e-prints (2017) arXiv:1702.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='00786 [1702.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='00786].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [53] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Punturo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', The Einstein Telescope: A third-generation gravitational wave observatory, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 27 (2010) 194002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [54] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hild et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', Sensitivity Studies for Third-Generation Gravitational Wave Observatories, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 28 (2011) 094013 [1012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='0908].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [55] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Sathyaprakash et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', Scientific Objectives of Einstein Telescope, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 29 (2012) 124013 [1206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='0331].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [56] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Maggiore et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', Science Case for the Einstein Telescope, JCAP 03 (2020) 050 [1912.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='02622].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [57] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Crowder and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cornish, Beyond LISA: Exploring future gravitational wave missions, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 72 (2005) 083005 [gr-qc/0506015].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [58] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Corbin and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Cornish, Detecting the cosmic gravitational wave background with the big bang observer, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 23 (2006) 2435 [gr-qc/0512039].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [59] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Harry, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Fritschel, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Shaddock, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Folkner and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Phinney, Laser interferometry for the big bang observer, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 23 (2006) 4887.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [60] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Li, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='-X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Tang and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Shi, Electromagnetic response of a Gaussian beam to high frequency relic gravitational waves in quintessential inflationary models, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' D 67 (2003) 104008 [gr-qc/0306092].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [61] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Armengaud et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=', Conceptual Design of the International Axion Observatory (IAXO), JINST 9 (2014) T05002 [1401.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='3233].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [62] IAXO collaboration, Physics potential of the International Axion Observatory (IAXO), JCAP 06 (2019) 047 [1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='09155].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [63] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Gross and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Jackiw, Low-Energy Theorem for Graviton Scattering, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' 166 (1968) 1287.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [64] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Gleisberg, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Krauss, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Matchev, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Schalicke, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Schumann and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Soff, Helicity formalism for spin-2 particles, JHEP 09 (2003) 001 [hep-ph/0306182].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 21 – [65] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' de Aquino, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Hagiwara, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Li and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Maltoni, Simulating graviton production at hadron colliders, JHEP 06 (2011) 132 [1101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='5499].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' [66] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' van Dam and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Veltman, Massive and massless Yang-Mills and gravitational fields, Nucl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' B 22 (1970) 397.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} +page_content=' – 22 –' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/oNFIT4oBgHgl3EQfuyvd/content/2301.11345v1.pdf'} diff --git a/qNE0T4oBgHgl3EQfqwHK/content/2301.02558v1.pdf b/qNE0T4oBgHgl3EQfqwHK/content/2301.02558v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..24162b4d50dd757ca8154afa1a1d987201abb582 --- /dev/null +++ b/qNE0T4oBgHgl3EQfqwHK/content/2301.02558v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593a28acdeb319cc057752f13afc9969b5e2a8c3804785ee8609359f4299e343 +size 5288462 diff --git a/qNE0T4oBgHgl3EQfqwHK/vector_store/index.faiss b/qNE0T4oBgHgl3EQfqwHK/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..779e81119f72a01200541c552acb1531284eb75b --- /dev/null +++ b/qNE0T4oBgHgl3EQfqwHK/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16ec9c5d4df1bb9694caf73bebfac1b5bfa9ec957bf2269d2821bd4bf5259550 +size 5177389 diff --git a/qNE0T4oBgHgl3EQfqwHK/vector_store/index.pkl b/qNE0T4oBgHgl3EQfqwHK/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..662b304699caba822296b52c69bcf00bc53f4018 --- /dev/null +++ b/qNE0T4oBgHgl3EQfqwHK/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53dcab4d1e91412e03fe9fe1e9841579edf89b0ef4c273404c9582f5284306b6 +size 183536 diff --git a/r9FIT4oBgHgl3EQfySu_/content/tmp_files/2301.11360v1.pdf.txt b/r9FIT4oBgHgl3EQfySu_/content/tmp_files/2301.11360v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..ad43f53fe0cd4a0d3515e06c7049e3fd00b83d9e --- /dev/null +++ b/r9FIT4oBgHgl3EQfySu_/content/tmp_files/2301.11360v1.pdf.txt @@ -0,0 +1,1762 @@ +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random +Filters? +Paul Gavrikov 1 Janis Keuper 1 2 +Abstract +Modern CNNs are learning the weights of vast +numbers of convolutional operators. In this paper, +we raise the fundamental question if this is actu- +ally necessary. We show that even in the extreme +case of only randomly initializing and never up- +dating spatial filters, certain CNN architectures +can be trained to surpass the accuracy of standard +training. By reinterpreting the notion of pointwise +(1 × 1) convolutions as an operator to learn lin- +ear combinations (LC) of frozen (random) spatial +filters, we are able to analyze these effects and pro- +pose a generic LC convolution block that allows +tuning of the linear combination rate. Empirically, +we show that this approach not only allows us +to reach high test accuracies on CIFAR and Ima- +geNet but also has favorable properties regarding +model robustness, generalization, sparsity, and the +total number of necessary weights. Additionally, +we propose a novel weight sharing mechanism, +which allows sharing of a single weight tensor be- +tween all spatial convolution layers to massively +reduce the number of weights. +Code: https://after-accept.com. +1. Introduction +Convolutional Neural Networks (CNN) are building the +backbone of state-of-the-art neural architectures in a wide +range of learning applications on n-dimensional array data, +such as standard computer vision problems like 2D image +classification, semantic segmentation, or scene understand- +ing. In order to solve these tasks, modern CNN architectures +are learning the entries (=weights) of millions of convolu- +tional filter kernels. This process is not only very compute +and data intensive, but apparently also mostly redundant +as CNNs are learning kernels that are bound to the same +distribution, even when training different architectures on +*Equal contribution 1IMLA, Offenburg University, Offenburg, +Germany 2Fraunhofer ITWM, Kaiserslautern, Germany. Corre- +spondence to: Paul Gavrikov . +Copyright 2023 by the author(s). +1 +2 +4 +8 +16 +32 +64 +128 +84 +85 +86 +87 +88 +89 +90 +91 +92 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Validation Accuracy [%] +Figure 1. Validation +accuracy +of +LCResNet-20-16x{E} +on +CIFAR-10 with • frozen random or • learnable spatial convo- +lutions under increasing LC expansion {E} in the LC-Blocks. +The size of the marker indicates the variance in the validation +accuracy over several runs. +different datasets for different tasks (Gavrikov & Keuper, +2022a). Yet if - in oversimplified terms - all CNNs are +learning the “same” filters, one could raise the fundamental +question if we actually need to learn them at all. +In order to investigate if and how the training of a CNN +with non-learnable filters is possible, we retreat to a setup +that eliminates any possible bias in the choice of the filters: +we simply set random filters. This is not only practically +feasible since random initializations of kernel weights are +part of the standard training procedure, but also theoreti- +cally justified by a long line of prior work investigating the +utilization of random feature extraction (e.g. see (Rahimi +& Recht, 2007) for a prominent example) prior to the deep +learning era. +Another cornerstone of our analysis is the so-called point- +wise (1 × 1) convolution, which is increasingly used in +modern CNNs. Despite its name and similarities in the +implementation details, we will argue that this learnable op- +erator differs significantly from spatial k × k convolutions +and learns linear combinations of non-learnable (random) +spatial filters. +By applying only minor changes to common CNN archi- +tectures, we show that networks are able to learn how to +combine non-learnable, randomly initialized spatial filters +arXiv:2301.11360v1 [cs.CV] 26 Jan 2023 + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +for the extraction of meaningful features. +We summarize our key contributions as follows: +• We show empirically, that a certain type of randomly +initialized CNNs (with specific 1 × 1 configurations) +can be trained to high validation accuracies on 2D +image classification tasks without the need to learn the +weights of spatial convolution filters. +• Based on this finding, we introduce a novel convolution +block, computing learnable linear combinations (LC) +of (frozen random) filters. Using the resulting LCRes- +Nets, we are investigating the properties of networks +that are limited to using random spatial filters. +• Our empirical results not only show that LCResNets +with frozen random spatial convolutions and high LC +rates are able to outperform their conventionally trained +counterparts, but we also show favorable properties of +linear combined filters in terms of robustness, sparsity, +and model size. +• Further, we introduce novel weight sharing methods, +which allow the re-usage of the same random weights +in all layers, massively reducing the number of weights +in CNN architectures. +2. Related Work +Random Model Parameters. +Modern neural network +weights are commonly initialized with values drawn i.i.d. +from uniform or normal distributions. To improve the gra- +dient flow (Hochreiter, 1991; Kolen & Kremer, 2001), the +standard deviation is adjusted according to the channel fan, +based on proposed heuristics by (He et al., 2015a; Glorot & +Bengio, 2010). +Rudi & Rosasco (2017) provide an analysis of generaliza- +tion properties of random features and conclude that many +problems exist, where exploiting random features can reach +significant accuracy, at a significant reduction in computa- +tion cost. +Based on the Lottery Ticket Hypothesis (LTH) (Frankle & +Carbin, 2019) observations that deep neural network can be +trained with extremely small parameter subsets to the same +accuracy both Zhou et al. (2019); Ramanujan et al. (2019) +propose methods that prune weights of randomly initialized +CNNs that achieve good (albeit well beyond trained) perfor- +mance on ImageNet. Both approaches rely on unstructured +weight pruning. +Frankle et al. (2021) study freezing all network parameters +during training except the β and γ parameters of Batch- +Normalization layers (Ioffe & Szegedy, 2015) and reveal +that models are still able to learn highly non-trivial perfor- +mances, only via affine transformations of features. This is +somewhat orthogonal to our research. However, we study +linear combinations in weight space instead and obtain sig- +nificantly higher performances even with off-the-shelf archi- +tectures. +Zhang et al. (2022) show that entire weights of specific +convolution layers can be reset to i.i.d. initializations af- +ter training without significantly hurting the accuracy. The +number of such layers decreases with increased dataset com- +plexity. +Ulyanov et al. (2018) demonstrated that randomly weighted +CNNs generate good priors for standard inverse problems +such as super-resolution, inpainting, or denoising. +Convolution Filters from Linear Combinations. +A dif- +ferent line of work explores learning filters as linear com- +binations as different (frozen) bases such as DCT (Ulicny +et al., 2022), Wavelets (Liu et al., 2019), Fourier-Bessel +(Qiu et al., 2018), or eigenimages of pretrained weights +(Tayyab & Mahalanobis, 2019). These bases can be seen as +a set of fixed filters and therefore similar to our approach. +However, most bases-approaches enforce the same amount +of filters in every layer, whereas, naturally, the amount of +filters varies per layer (as defined by the architecture). Fur- +thermore, the number of bases is finite, which limits the +amount of possible linear combinations. Contrary, there +are infinitely many random filters. This “overcompleteness” +may in fact be necessary as suggested by the LTH (Frankle +& Carbin, 2019). +Analysis of Convolution Filters. +A long thread of re- +search (Olah et al., 2020a;b;c; Cammarata et al., 2020; 2021; +Schubert et al., 2021; Voss et al., 2021a;b; Petrov et al., +2021) extensively analyzed the features, connections, and +their organization of a trained InceptionV1 (Szegedy et al., +2014) model. Among others, the authors claim that different +CNNs will form similar features and circuits even when +trained for different tasks. This is backed by a large-scale +analysis of learned 3 × 3 convolution kernels (Gavrikov +& Keuper, 2022a), which additionally reveals that CNNs +generally seem to learn highly similar convolution kernel +distributions, independent of training data or task. Further, +the majority of kernels seem to be randomly distributed or +defunct, and only a small rate seems to be performing useful +transformations. +Pointwise Convolutions. +Lin et al. (2014) first introduced +the concept of “network in network” in which pointwise +(1×1) convolutions are used to “enhance the model discrim- +inability for local receptive fields”. Although implemented +similarly to spatial convolutions, pointwise convolutions do +not aggregate the local neighborhood but instead compute +linear combinations of the inputs and can be seen as a kind +of fully-connected layer rather than a traditional convolution. +Modern CNNs often use pointwise convolutions (e. g. (He +et al., 2015b; Sandler et al., 2018; Liu et al., 2022)) to reduce + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +the number of channels before computationally expensive +operations such as spatial convolutions or to approximate +the computation of regular convolutions using depthwise +filters (depthwise separable convolutions (Chollet, 2017)). +Interestingly, spatial convolutions can also learn to mimic +this behavior: Gavrikov & Keuper (2022b) reported that +CNNs trained with ℓ∞-adversarial training (Madry et al., +2018) primarily learn the center weight in initial convolution +layers with 3 × 3 kernels, while other weights are (close to) +zero. Due to a lack of local neighborhood aggregation by +these kernels, they effectively act as pointwise convolutions. +3. Preliminaries +Convolutions. +We define a 2D convolution layer by a +function Fconv2d(X; W), F transforming an input tensor +X with cin input-channels into a tensor with cout output- +channels using convolution filters with a size of k0 × k1. +Without loss of generality, we assume square kernels with +k = k0 = k1 in this paper. Further, we denote the learned +weights by W ∈ Rcout×cin×k×k. The outputs Yi are then +defined as: +Yi = Wi ∗ X = +cin +� +j=1 +Wi,j ∗ Xj, for i ∈ {1, . . . , cout}. +(1) +Note how the result of the convolution is reduced to a linear +combination of inputs with a now scalar Wi,j for the special +case of k = 1 (pointwise convolution): +Yi = +cin +� +j=1 +Wi,j ∗ Xj = +cin +� +j=1 +Wi,j · Xj +(2) +The PyTorch default initialization of model weights is Kaim- +ing Uniform (He et al., 2015a). Here, every kernel weight +w ∈ W is drawn i.i.d. from a uniform distribution bounded +by a heuristic derived from the input fan (inputs cin × kernel +area k2). At default values, this is equivalent to: +w ∼ U[−a,a] with a = +1 +√cink2 +(3) +w0 +wn-1 += +⋅ ++ ... + +⋅ +Figure 2. Linear combinations of random filters are able to recon- +struct learned spatial filters. +Linear combinations. +Definition 3.1. A pointwise convolution applied over the +outputs of spatial convolutions computes linear combina- +tions of previous outputs, which is equivalent to a convolu- +tion with a linear combination of previous filters with the +same coefficients. +Proof. Assume that the l-th layer is a regular convolution +with k > 1, and inputs into a k = 1 pointwise convolution +layer (l + 1). X is the input. Then setting Equation (1) as +input for Equation (2) results in: +Y (l+1) +i += +c(l+1) +in� +j=1 +W (l+1) +i,j +· X(l+1) +j += +c(l+1) +in� +j=1 +W (l+1) +i,j +· +� +W (l) +i +∗ X(l)� += X(l) ∗ +c(l+1) +in� +j=1 +� +W (l+1) +i,j +· W (l) +i +� +(4) +As such, any (learned) filter can be approximated by a +(learned) linear combination of sufficiently many random +filters (Figure 2). +4. Experiments +In the following, we conduct experiments on models with +spatial convolution weights frozen to their initial random +values. Therefore, spatial convolution weights are never +updated and do not require gradients. For simplicity, we +will refer to such models as frozen random through the re- +mainder of the paper. +Due to a large number of experiments needed for our analy- +sis, we mostly experiment on CIFAR-10 (Krizhevsky et al., +2009) and later show that our observations in-principle scale +to ImageNet (Deng et al., 2009). +Training setup. +We train all CIFAR models with the same +hyperparameters (see Appendix C) for all experiments as +they produce reasonable (although not SOTA) results on +many architectures optimized for CIFAR and ensure a fair +comparison within our experiments. Hence, it should be +noted that individual hyperparameter tuning would increase +the individual model performance in most cases. All results +are reported over at least 4 runs unless stated otherwise. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +No LC +LC +ResNet-20 +ResNet-14 +ResNet-18 +ResNet-34 +ResNet-50 +Wide-ResNet-50x2 +ResNet-101 +MobileNet v2 +60 +70 +80 +90 +Spatial Convolutions +Frozen Random +Learnable +Model +Validation Accuracy [%] +Figure 3. Validation accuracy of different models trained on +CIFAR-10 with random frozen random vs. learnable spatial con- +volutions. Models in the right half use blocks that integrate 1 × 1 +convolutions after spatial convolutions and are, therefore, able to +approximate learned convolution filters by linear combinations of +random filters. +4.1. Baseline Experiments +We start by training common off-the-shelf architectures1 +such as ResNet-14/18/34/50/101 (He et al., 2015b), a spe- +cial ResNet-variant modified for CIFAR called ResNet-20 +(He et al., 2015b), Wide-ResNet-50x2 (Zagoruyko & Ko- +modakis, 2016), and MobileNet v2 (Sandler et al., 2018) +on CIFAR-10. Although all models achieve an approxi- +mately similar validation accuracy when trained normally, +we observe two kinds of frozen random behavior (Figure 3): +ResNet-50/101, Wide-ResNet-50x2, and MobileNet v2 ap- +proximately converge to similar accuracy (1.6-1.9% differ- +ence), while the other models show heavy drops of at least +16% in accuracy. An explanation for this effect can be found +in common architectural elements of the first set of models: +contrary to the Basic-Blocks in other models, they all use +Bottleneck-Blocks or variants thereof (Sandler et al., 2018) +which complement the traditional spatial convolutions by +pointwise (1 × 1) convolutions, outside the common usage +in downsampling operations in residual skip-connections +(He et al., 2015b). As shown in Equation (4), the linear +combination computed by pointwise convolutions also ap- +plies to weights of spatial convolutions, and, therefore, these +models are able to approximate learned convolutions from +linear combinations of random filters. +4.2. Increasing Linear Combinations +To study the effect of linear combination capabilities on the +ability of CNNs to learn with frozen random filters in more +detail, we introduce LCResNets specifically designed to +1Some are slightly modified to operate on low-resolution im- +ages. We will release these architectures with the rest of the code. +1 +2 +4 +8 +16 +32 +64 +128 +40 +45 +50 +55 +60 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Robust Accuracy [%] +Figure 4. Robust (FGSM, ℓ∞, ϵ = 1/255) validation accuracy +of LCResNet-20-16x{E} on CIFAR-10 with frozen random or +learnable spatial convolutions under increasing LC expansion +{E}. +3x3 Conv +1x1 Conv (LC) +BN + ReLU +3x3 Conv +1x1 Conv (LC) +BN +LC Expansion +ReLU +LC Expansion +cout +cin +cin +cin +cout +1x1 Conv +(only if cin≠cout else Identity) +cout +Inputs (Width) +LC-Block +LC-Block +Figure 5. Basic-Block with LC-Blocks. We replace all convolu- +tions with LC-Blocks which consist of a spatial and pointwise con- +volution. The expansion factor E allows increasing the number of +spatial filters/linear combinations without altering the LC-Blocks +number of outputs. +allow tweaking of the linear combinations of convolution fil- +ters without affecting other layers. We build these based on +the (Basic-Block) CIFAR-ResNet-variant introduced by (He +et al., 2015b). Compared to regular ResNets, the CIFAR- +specialized architecture has a drastically lower number of +parameters and is, therefore, more suitable for large-scale +studies such as this one. In the architecture, we replace +every spatial convolution with an LC-Block: a spatial con- +volution with cin input channels and cout output channels +then becomes a spatial convolution with cout × E filters fed +into a pointwise convolution with cout outputs (Figure 5). +We denote these models by LCResNet-{D}-{W}x{E}, +where {D} is the network depth i. e. the number of spatial +convolution and fully-connected layers, {W} the network +width (default 16) i. e. the initial number of channels com- +municated between Basic-Blocks, and {E} an LC expan- +sion factor (default 1) which increases the spatial filters in +LC-Blocks and, therefore, computed linear combinations +without increasing the block’s number of outputs. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +Closing the performance gap. +Previous experiments re- +vealed a slightly worse accuracy of frozen random models. +As per our hypothesis, an increase in the number of spa- +tial filters/linear combinations should close this gap which +we test by exponentially increasing the expansion factor +of LCResNet-20-16 (Figure 1). While we see a marginal +increase in accuracy in regular training, we see a steady +increase in the accuracy of frozen models. At an expansion +factor of 8 the two training variants approximately break +even, and, surprisingly, beyond that, frozen random models +even outperform their counterparts. A possible explanation +may be found in overfitting: due to their limited ability to +learn arbitrary filter patterns, frozen models may generalize +better. To strengthen this hypothesis, we measure the robust +accuracy of the models via light ℓ∞-FGSM-attacks (Good- +fellow et al., 2015) with ϵ = 1/255 ) and see similar trends +in robustness (Figure 4). +Due to the accuracy gap, it seems viable to conclude that +frozen random models learn different representations and +hence different filters. In the following, we aim to quantify +this based on filter variance entropy (Gavrikov & Keuper, +2022a). The singular value decomposition-based metric +quantifies the diversity of filter kernels, by providing a mea- +surement in an interval between entirely random patterns +(as seen in just initialized weights) and a singular pattern +repeated throughout all kernels. +We apply this metric to understand the learned deviation +from the random initialization. For this experiment, instead +of analyzing the random spatial filters directly, we compute +the resulting linear combination of spatial filters. Further, +we limit ourselves to the filters in the initial convolution +layer, as it is generally well-understood and studied (Fig- +ure 6). In normally trained models we measure an expected +balanced diversity of patterns that does not significantly +fluctuate with LC expansion. Obversely, frozen random +models at default expansion produce almost random filters +due to an insufficient amount of linear combinations in the +initial layer (only 16). With an increasing expansion, frozen +random models can diversify their patterns. Yet, even at the +highest studied expansion rate, they remain more diverse, +which again may limit the risk of overfitting. +Based on the results, we conclude that 1) regularly trained +networks with extreme expansion are prone to overfitting; +2) the performance of frozen random models increases with +the number of linear combinations but they appear to over- +fit less; 3) frozen random models can outperform normal +training regimes. Often in addition to the massive savings +in trainable parameters (Table 1). +Alternatively to expansion, reducing the performance +gap can also be achieved by increasing the network width +(Figure 7). Wider networks generally reach higher accu- +racies, but also decrease efficiency due to increasing chan- +nels. Lastly, due to the compositional nature of deep neural +networks, the gap also diminishes with increasing depth +1 +128 +16 +2 +32 +4 +64 +8 +0 +0.2 +0.4 +0.6 +0.8 +1 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Norm. Filter Variance Entropy +Figure 6. Filter variance entropy normalized by the randomness +threshold as a metric of diversity in filter patterns of the first layer. +Measured on LCResNet-20-16x{E} trained on CIFAR-10 with +frozen random or learnable spatial convolutions under increasing +LC expansion {E}. Values above 1 indicate a random distribution +of kernel patterns, while values of 0 indicate a collapse to one +specific pattern. +16 +32 +64 +128 +256 +84 +86 +88 +90 +92 +94 +Spatial Convolutions +Frozen Random +Learnable +Network Width +Validation Accuracy [%] +Figure 7. Validation accuracy of LCResNet-20-{W}x1 on +CIFAR-10 with frozen random or learnable spatial convolutions +under increasing network width {W}. +(Figure 8), yet at a slow and impractical rate compared to +expansion or width (break-even at approx. D = 260). +4.3. Reducing Network Parameters +At initialization, model weights are i.i.d. and do not show +any inherent patterns. As such, it appears intriguing to un- +derstand if a specific set of weights can be shared throughout +all spatial convolution layers to decrease the total number +of network parameters. +Global weight sharing. +We first start with a naive ap- +proach, where we draw a random weight Ws ∼ U[−1,1] to be +shared. The shape of Ws is the maximum length of all spa- +tial convolution weights: maxl∼spatial conv(θ) c(l) +outc(l) +in k(l)k(l). +Convolution weights are then (reshaped) slices of Ws, ac- + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +20 +32 +44 +56 +68 +80 +92 +152 +260 +84 +85 +86 +87 +88 +89 +90 +91 +92 +Spatial Convolutions +Frozen Random +Learnable +Network Depth +Validation Accuracy [%] +Figure 8. Validation +accuracy +of +LCResNet-{D}-16x1 +on +CIFAR-10 with frozen random or learnable spatial convolutions +under increasing network depth {D}. +1 +2 +4 +8 +16 +32 +64 +128 +84 +85 +86 +87 +88 +89 +90 +91 +92 +Weight Sharing +Enabled +Disabled +LC Expansion +Validation Accuracy [%] +Figure 9. Validation accuracy of frozen random LCResNet-20- +16x{E} with weight sharing vs. non-shared training under in- +creasing expansion {E}. +cording to the required length in the respective layer. The +slicing can be implemented as views of the Ws tensor and, +therefore, should not consume additional memory. To coun- +teract the vanishing/exploding gradient problem, we scale +individual slices by a fixed coefficient s = 1/√cink2 per +layer derived from (He et al., 2015a) (see Appendix D for +details). +Training LCResNets with weight sharing reveals an inter- +esting effect: the total number of parameters drastically +decreases (Table 1), although models trained with weight +sharing perform approximately on par with ones without +sharing (Figure 9). For some expansion factors training with +weight sharing even outperforms frozen random training. +At the largest evaluated expansion, weight sharing performs +only 0.31% worse. +Recycled weight sharing. +Since sharing weights between +layers successfully decreases the total number of parameters +without significant accuracy impact, we aim to understand +5 +10 +2 +5 +100 +2 +5 +1000 +2 +5 +10k +2 +5 +100k +2 +5 +1M +30 +40 +50 +60 +70 +80 +90 +Shared Parameters +Validation Accuracy [%] +Figure 10. Validation accuracy of frozen random LCResNet-20- +64x1 on CIFAR-10 at different levels of recycled weight sharing. +Vertical lines indicate the length of all requested slices. +Table 1. Successive reduction of total and learnable parameters in +an LCResNet-20-16x128 by applying the techniques proposed. +Total +Learnable +Val. +Method +Params [M] +Params [M] +Acc. [%] +Baseline +38.4 +38.4 +91.39 ± 0.29 ++ Frozen +38.4 +4.2 +91.89 ± 0.10 ++ Weight Sharing +8.8 +4.2 +91.58 ± 0.15 ++ Recycled WS (1/16) +4.6 +4.2 +91.92 ± 0.21 ++ 1 × 1 Reg. (λ = 5e − 5) +4.6 +4.2 +91.57 ± 0.28 ++ 1 × 1 Pruning (ρ = 0.7) +1.7 +4.2 +91.34 ± 0.24 +Reduction +22.6× +9.1× +- +whether more weights can be reused to further reduce this +number. We test this by gradually reducing the length of +Ws. If the length of a requested slice exceeds Ws, we +stack copies of it until it becomes of sufficient length, and +then take a slice from the resulting tensor. We test values +that are factors of k2, to reshare weights corresponding to +entire filter kernels. We empirically test this procedure on +an LCResNet-20-64x1 (Figure 10) and observe that a 4× +reduction only results in an accuracy drop of 0.17% and +10× in only approx. 1%, indicating that indeed weights can +be recycled up to a certain threshold. Beyond that, accuracy +significantly decreases. +Weight sparsity. +Up to this point, we showed ways to +reduce the number of parameters by design choices dur- +ing training. Moreover, it is possible to further reduce the +amount by pruning after training. Given the random nature +of spatial convolution weights in frozen random training, +we do not expect pruning to be successful and directly focus +on the pointwise convolution weights. +To get an estimate of the expected reduction we apply un- +structured global magnitude pruning (LeCun et al., 1989) +on the 1 × 1 convolution weights without further finetuning. +Although highly expanded networks already contain a large + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +amount of near-zero 1 × 1-weights (see Appendix F for +histograms), the ratio can be increased by a regularization +term. We propose an ℓ1 regularization over the pointwise +weights to the training objective L that optimizes the set of +network parameters θ: +min +θ +L + λ +� +W ∼pointwise conv(θ) +∥W∥1 +(5) +Combining approaches. +Exemplarily, we show the ef- +fect of all techniques combined on an LCResNet-20-16x128 +in Table 1. We achieve the final reduction of 22.6× on the +total parameters and 9.1× on the learnable parameters by +consequently applying our proposed techniques. For recy- +cled weight sharing, we shrink the shared weight to 1/16 of +its original size and use λ = 5e − 5 for regularization. On +top of that, we prune 70% of pointwise convolution weights +with one of the simplest pruning techniques having almost +no computational overhead. Trading-off increased training +time, we expect more sophisticated pruning methods in com- +bination with additional fine-tuning of the model to further +increase these ratios. For ablation of λ and the impact of +pruning on the accuracy see Appendix H; for an analysis of +timing and memory consumption see Appendix I. +4.4. Increasing Kernel Size +Our networks use the default 3 × 3 kernel size, which was +dominant for the past years. However, recently proposed +CNNs often increase the kernel size e. g. (Tan & Le, 2020; +Liu et al., 2022; Trockman & Kolter, 2022), sometimes to +as large as 31 × 31 (Ding et al., 2022). +To verify that linear combinations scale to larger frozen +random kernels, we increase the convolution sizes in an +LCResNet-20-16 to k ∈ {5, 7, 9} (with a respective increase +of input padding) and measure the gap between training with +learned and frozen random spatial convolutions (Figure 11). +Our results show that the gap between frozen random and +regular models increases with kernel size, but steadily di- +minishes with increasing expansion and eventually breaks +even for all our tested expansions, except for k = 9 which +we expect to also break even at larger expansions. +We visualize the linear combinations of 9 × 9 filters in the +first convolution layers under increasing expansion in Fig- +ure 12. It becomes clearly visible how the reconstructed +filters increase to resemble the learned filters as the expan- +sion increases. For more comparisons, see Appendix E. +A possible explanation for the inferior performance of +larger frozen random kernels can be found in the i.i.d. ini- +tialization of weights: kernel weights are initialized without +consideration of the weight location in the filter. How- +ever, we observe that the variance in learned kernel weights +is highly influenced by the weight location (Figure 13). +The variance is approximately uniformly distributed for +k ∈ {3, 5}, which allows linear combinations to reconstruct +1 +128 +16 +2 +32 +4 +64 +8 +−14 +−12 +−10 +−8 +−6 +−4 +−2 +0 +Kernel Size +3 +5 +7 +9 +LC Expansion +Gap in Validation Accuracy [%] +Figure 11. Gap in validation accuracy of frozen random and +learnable LCResNet-20-16x{E} on CIFAR-10 with different con- +volution kernel sizes under increasing LC expansion {E}. +Random +x1 +x2 +x4 +x8 +x16 +x32 +x64 +x128 +Learned +Figure 12. Visualization of the reconstructed 9×9 convolution +filters after linear combination of the first convolution layer in +frozen random LCResNets-20-16x{E} with increasing expansion +{E}. Compared to random and learned weights. +learned weights well. Yet, the variance per location in- +creases to deviate from a uniform distribution as the kernel +size increases: at larger k outer weights show significantly +lower variance, while the highest variance is located in the +center of the filter. Linear combinations do not change the +variance of individual weights and the variance remains +equally distributed. Therefore, the probability of a full re- +construction of learned weights from uniformly initialized +filters decreases with increasing kernel size. Assuming +that the learned filters are optimal, the reconstruction er- +ror should correlate with the accuracy gap. The increasing +reconstruction error can also again be quantified by mea- +surements of the filter variance entropy (see Appendix E for +measurements). +4.5. Scaling to ImageNet +In this section, we want to demonstrate that our results also +scale to larger and more complex datasets such as ImageNet. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +Learnable +3x3 +5x5 +7x7 +9x9 +0.0 +0.5 +1.0 +1.5 +2.0 +Frozen Random +0.0 +0.5 +1.0 +1.5 +2.0 +Figure 13. Variance of weights per element of convolution ker- +nels of learned (top) and linear combinations of frozen random +(bottom) convolutions. Measured in LCResNet-20-16x1 with dif- +ferent kernel sizes. The variance was measured over all convolu- +tion kernels in a model. Kernels were normalized by the standard +deviation of the entire convolution weight in their respective layer. +Instead of repeating our previous experiments with theoreti- +cal architectures, we train off-the-shelf models that already +integrate pointwise convolutions such as ResNet-50, ResNet- +50d (He et al., 2019) [replaces the 7 × 7 convolutions in the +stem by 3 layers of 3 × 3 convolutions], ResNeXt-50-32x4d +(Xie et al., 2017) [uses depthwise separable convolutions], +Wide-ResNet-50x2, and MobileNet v2/v3 (Howard et al., +2019). As a sanity check, we also train a ResNet-18 that +does not compute linear combinations. +We train all models as per (Wightman et al., 2021) with +automatic mixed precision training (Micikevicius et al., +2018) for 300 epochs at 2242 px resolution without any +pre-training and report top-1 and top-5 accuracy for both, +learnable and frozen random training. +The results (Table 2) show larger gaps in accuracy on Ima- +geNet than on CIFAR-10. This is not particularly surprising, +as ImageNet is a more complex dataset and results in more +diverse filter patterns (Gavrikov & Keuper, 2022a; Zhang +et al., 2022), which in turn increases the complexity of re- +construction from random filters. Additionally, most of the +analyzed models contain convolutions larger than 3 × 3 in +their initial layers, increasing the reconstruction error to +learned weights e. g. we see a reduction in the gap when +switching from ResNet-50 to ResNet-50d. Overall, we ob- +tain highly non-trivial performances by simply exploiting +linear combinations with as low as 3.21% gap in valida- +tion accuracy. And again, we observe that a Wide-ResNets +shows a smaller gap than a traditional ResNet backing our +scaling theory. Our sanity check shows an expected gap of +35.34%, due to the lack of linear combinations. +Note that none of these models are as wide as the LCResNets +we experimented with in previous sections. We hypothesize, +that at an increased expansion or width, frozen random Ima- +geNet models would also perform on par with their regular +counterparts. Naturally, the proposed parameter reduction +techniques will apply to these models as well, albeit they +may be less impactful. +Table 2. ImageNet top-1 (top-5 in brackets) validation accuracy of +various models with regular and frozen random training. Results +are reported over a single run. +Val. Acc. +Val. Acc. +Top-1 +Model +LC +Frozen Rand [%] +Learnable [%] +∆ [%] +ResNet-18 + +36.54 (59.84) +71.88 (90.27) +35.34 +ResNet-50 + +74.45 (91.93) +79.50 (94.50) +5.05 +ResNet-50d + +75.66 (92.28) +79.78 (94.41) +4.32 +ResNeXt-50-32x4d + +76.60 (93.00) +79.81 (94.50) +3.21 +Wide-ResNet-50x2 + +76.70 (92.83) +80.09 (94.51) +3.39 +MobileNet v2 S 1.00 + +66.34 (86.63) +71.69 (90.45) +5.35 +MobileNet v3 L 1.00 + +68.41 (87.96) +76.60 (93.00) +8.19 +5. Conclusion, Discussion, and Future Work +We have demonstrated that networks that compute linear +combinations of random convolution weights can achieve +highly non-trivial performances without ever updating con- +volution weights. In the extremes, these frozen random +models even outperform regular models. In combination +with weight sharing and pruning of pointwise weights, both +the number of total and learnable parameters can be reduced +resulting in faster training and smaller model checkpoints +on disc. +Also, we have observed, that in general, linear combinations +can scale to larger kernels, albeit at an increasing reconstruc- +tion error against learnable weights. A relatively simple +solution for this seems to be an adjustment of variance de- +pending on the position in the filter leading to non-i.i.d. ini- +tializations. Ultimately this arises in the question of whether +there is a more suitable set of filters that work for a variety +of problems. Finding such a set may indeed allow training +off-the-shelf architectures on-par with traditional learning +without ever learning spatial convolution filters. +6. Limitations +Our proposed weight sharing only significantly reduces pa- +rameters if models rely on traditional convolutions. Yet, +the number of saved parameters may be not that noticeable +during training, as the majority of memory is consumed +by gradients and intermediate computations. Further, re- +cently there has been a trend (Sandler et al., 2018; Howard +et al., 2019; Liu et al., 2022) towards depthwise convolu- +tions where the dominant amount of parameters is allocated +in pointwise weights and only a minor share in spatial con- +volutions. In these settings, neither freezing nor weight +sharing significantly decrease the total number of parame- +ters. Also, generally, some parameter savings techniques +such as pruning may only become relevant on specialized +soft- or hardware. +We have already shown that linear combinations struggle +to reconstruct filters as the kernel size increases. We often +assumed that learnable filters are optimal which is not nec- + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +essarily guaranteed. For example, filters with large kernel +sizes appear to only utilize a small amount of the filter vol- +ume. Yet, we have seen that LCs of frozen random filters +appear to close the gap while intrinsically being limited to +exploiting the full volume equally. At even higher expan- +sion rates, they may even outperform traditional learnable +filters. +References +Cammarata, N., Goh, G., Carter, S., Schubert, L., Petrov, +M., and Olah, C. Curve detectors. Distill, 2020. doi: +10.23915/distill.00024.003. URL https://distill. +pub/2020/circuits/curve-detectors. +Cammarata, N., Goh, G., Carter, S., Voss, C., Schubert, +L., and Olah, C. Curve circuits. Distill, 2021. doi: +10.23915/distill.00024.006. URL https://distill. +pub/2020/circuits/curve-circuits. +Chollet, F. Xception: Deep learning with depthwise separa- +ble convolutions. In Proceedings of the IEEE Conference +on Computer Vision and Pattern Recognition (CVPR), +July 2017. +Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and +Fei-Fei, L. Imagenet: A large-scale hierarchical image +database. In 2009 IEEE Conference on Computer Vi- +sion and Pattern Recognition, pp. 248–255, 2009. doi: +10.1109/CVPR.2009.5206848. +Ding, X., Zhang, X., Han, J., and Ding, G. Scaling up +your kernels to 31x31: Revisiting large kernel design in +cnns. In Proceedings of the IEEE/CVF Conference on +Computer Vision and Pattern Recognition (CVPR), pp. +11963–11975, June 2022. +Frankle, J. and Carbin, M. +The lottery ticket hypothe- +sis: Finding sparse, trainable neural networks. In In- +ternational Conference on Learning Representations, +2019. URL https://openreview.net/forum? +id=rJl-b3RcF7. +Frankle, J., Schwab, D. J., and Morcos, A. S. Training +batchnorm and only batchnorm: On the expressive power +of random features in CNNs. In International Conference +on Learning Representations, 2021. URL https:// +openreview.net/forum?id=vYeQQ29Tbvx. +Gavrikov, P. and Keuper, J. Cnn filter db: An empirical +investigation of trained convolutional filters. In Proceed- +ings of the IEEE/CVF Conference on Computer Vision +and Pattern Recognition (CVPR), pp. 19066–19076, June +2022a. +Gavrikov, P. and Keuper, J. Adversarial robustness through +the lens of convolutional filters. In Proceedings of the +IEEE/CVF Conference on Computer Vision and Pat- +tern Recognition (CVPR) Workshops, pp. 139–147, June +2022b. +Glorot, X. and Bengio, Y. +Understanding the diffi- +culty of training deep feedforward neural networks. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +In Teh, Y. W. and Titterington, M. (eds.), Proceed- +ings of the Thirteenth International Conference on Ar- +tificial Intelligence and Statistics, volume 9 of Pro- +ceedings of Machine Learning Research, pp. 249– +256, Chia Laguna Resort, Sardinia, Italy, 13–15 May +2010. PMLR. URL https://proceedings.mlr. +press/v9/glorot10a.html. +Goodfellow, I., Bengio, Y., and Courville, A. +Deep +Learning. +MIT Press, +2016. +http://www. +deeplearningbook.org. +Goodfellow, I. J., Shlens, J., and Szegedy, C. Explaining and +harnessing adversarial examples. In Bengio, Y. and Le- +Cun, Y. (eds.), 3rd International Conference on Learning +Representations, ICLR 2015, San Diego, CA, USA, May +7-9, 2015, Conference Track Proceedings, 2015. URL +http://arxiv.org/abs/1412.6572. +He, K., Zhang, X., Ren, S., and Sun, J. Delving deep into +rectifiers: Surpassing human-level performance on ima- +genet classification. In Proceedings of the IEEE Interna- +tional Conference on Computer Vision (ICCV), December +2015a. +He, K., Zhang, X., Ren, S., and Sun, J. Deep residual +learning for image recognition, 2015b. +He, T., Zhang, Z., Zhang, H., Zhang, Z., Xie, J., and Li, +M. Bag of tricks for image classification with convolu- +tional neural networks. In Proceedings of the IEEE/CVF +Conference on Computer Vision and Pattern Recognition +(CVPR), June 2019. +Hochreiter, S. Untersuchungen zu dynamischen neuronalen +netzen [in german]. Technical report, 1991. +Howard, A., Sandler, M., Chu, G., Chen, L.-C., Chen, B., +Tan, M., Wang, W., Zhu, Y., Pang, R., Vasudevan, V., +Le, Q. V., and Adam, H. Searching for mobilenetv3. In +Proceedings of the IEEE/CVF International Conference +on Computer Vision (ICCV), October 2019. +Ioffe, S. and Szegedy, C. Batch normalization: Accelerat- +ing deep network training by reducing internal covariate +shift. In Bach, F. and Blei, D. (eds.), Proceedings of the +32nd International Conference on Machine Learning, vol- +ume 37 of Proceedings of Machine Learning Research, +pp. 448–456, Lille, France, 07–09 Jul 2015. PMLR. +URL https://proceedings.mlr.press/v37/ +ioffe15.html. +Kolen, J. F. and Kremer, S. C. Gradient Flow in Recurrent +Nets: The Difficulty of Learning LongTerm Dependencies, +pp. 237–243. 2001. doi: 10.1109/9780470544037.ch14. +Krizhevsky, A., Nair, V., and Hinton, G. Cifar-10 (canadian +institute for advanced research). 2009. URL http: +//www.cs.toronto.edu/˜kriz/cifar.html. +LeCun, Y., Denker, J., and Solla, S. +Optimal brain +damage. +In Touretzky, D. (ed.), Advances in Neural +Information Processing Systems, volume 2. Morgan- +Kaufmann, 1989. +URL https://proceedings. +neurips.cc/paper/1989/file/ +6c9882bbac1c7093bd25041881277658-Paper. +pdf. +Lin, M., Chen, Q., and Yan, S. Network in network. In Ben- +gio, Y. and LeCun, Y. (eds.), 2nd International Confer- +ence on Learning Representations, ICLR 2014, Banff, AB, +Canada, April 14-16, 2014, Conference Track Proceed- +ings, 2014. URL http://arxiv.org/abs/1312. +4400. +Liu, P., Zhang, H., Lian, W., and Zuo, W. +Multi-level +wavelet convolutional neural networks. IEEE Access, +7:74973–74985, 2019. +doi: 10.1109/ACCESS.2019. +2921451. +Liu, Z., Mao, H., Wu, C.-Y., Feichtenhofer, C., Darrell, T., +and Xie, S. A convnet for the 2020s. In Proceedings of the +IEEE/CVF Conference on Computer Vision and Pattern +Recognition (CVPR), pp. 11976–11986, June 2022. +Loshchilov, I. and Hutter, F. SGDR: Stochastic gradient +descent with warm restarts. In International Conference +on Learning Representations, 2017. URL https:// +openreview.net/forum?id=Skq89Scxx. +Madry, A., Makelov, A., Schmidt, L., Tsipras, D., and +Vladu, A. +Towards deep learning models resistant +to adversarial attacks. +In International Conference +on Learning Representations, 2018. URL https:// +openreview.net/forum?id=rJzIBfZAb. +Micikevicius, P., Narang, S., Alben, J., Diamos, G., Elsen, +E., Garcia, D., Ginsburg, B., Houston, M., Kuchaiev, O., +Venkatesh, G., and Wu, H. Mixed precision training. In +International Conference on Learning Representations, +2018. URL https://openreview.net/forum? +id=r1gs9JgRZ. +Olah, C., Cammarata, N., Schubert, L., Goh, G., Petrov, +M., and Carter, S. +Zoom in: An introduction to cir- +cuits. Distill, 5, 2020a. doi: 10.23915/distill.00024.001. +URL https://distill.pub/2020/circuits/ +zoom-in. +Olah, C., Cammarata, N., Schubert, L., Goh, G., Petrov, +M., and Carter, S. An overview of early vision in incep- +tionv1. Distill, 2020b. doi: 10.23915/distill.00024.002. +URL https://distill.pub/2020/circuits/ +early-vision. +Olah, C., Cammarata, N., Voss, C., Schubert, L., and +Goh, G. Naturally occurring equivariance in neural net- +works. Distill, 2020c. doi: 10.23915/distill.00024.004. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +URL https://distill.pub/2020/circuits/ +equivariance. +Petrov, M., Voss, C., Schubert, L., Cammarata, N., Goh, +G., and Olah, C. Weight banding. Distill, 2021. doi: +10.23915/distill.00024.009. URL https://distill. +pub/2020/circuits/weight-banding. +Qiu, Q., Cheng, X., Calderbank, R., and Sapiro, G. DCFNet: +Deep neural network with decomposed convolutional +filters. International Conference on Machine Learning, +2018. +Rahimi, A. and Recht, B. Random features for large-scale +kernel machines. In Platt, J., Koller, D., Singer, Y., and +Roweis, S. (eds.), Advances in Neural Information Pro- +cessing +Systems, +volume +20. +Curran +Associates, +Inc., +2007. +URL +https://proceedings. +neurips.cc/paper/2007/file/ +013a006f03dbc5392effeb8f18fda755-Paper. +pdf. +Ramanujan, V., Wortsman, M., Kembhavi, A., Farhadi, A., +and Rastegari, M. What’s hidden in a randomly weighted +neural network? 2020 IEEE/CVF Conference on Com- +puter Vision and Pattern Recognition (CVPR), pp. 11890– +11899, 2019. +Rudi, A. and Rosasco, L. +Generalization properties of +learning with random features. In Guyon, I., Luxburg, +U. V., Bengio, S., Wallach, H., Fergus, R., Vish- +wanathan, S., and Garnett, R. (eds.), Advances in Neural +Information Processing Systems, volume 30. Curran As- +sociates, Inc., 2017. URL https://proceedings. +neurips.cc/paper/2017/file/ +61b1fb3f59e28c67f3925f3c79be81a1-Paper. +pdf. +Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., and +Chen, L.-C. Mobilenetv2: Inverted residuals and linear +bottlenecks. In Proceedings of the IEEE Conference on +Computer Vision and Pattern Recognition (CVPR), June +2018. +Schubert, L., Voss, C., Cammarata, N., Goh, G., and Olah, +C. High-low frequency detectors. Distill, 2021. doi: +10.23915/distill.00024.005. URL https://distill. +pub/2020/circuits/frequency-edges. +Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., +Anguelov, D., Erhan, D., Vanhoucke, V., and Rabinovich, +A. Going deeper with convolutions, 2014. +Tan, M. and Le, Q. V. Efficientnet: Rethinking model +scaling for convolutional neural networks, 2020. +Tayyab, M. and Mahalanobis, A. Basisconv: A method for +compressed representation and learning in cnns. CoRR, +abs/1906.04509, 2019. +Trockman, A. and Kolter, J. Z. +Patches are all you +need?, 2022. +URL https://arxiv.org/abs/ +2201.09792. +Ulicny, M., Krylov, V. A., and Dahyot, R. Harmonic convo- +lutional networks based on discrete cosine transform. Pat- +tern Recognition, 129:108707, 2022. ISSN 0031-3203. +Ulyanov, D., Vedaldi, A., and Lempitsky, V. Deep image +prior. In Proceedings of the IEEE Conference on Com- +puter Vision and Pattern Recognition (CVPR), June 2018. +Voss, C., Cammarata, N., Goh, G., Petrov, M., Schubert, +L., Egan, B., Lim, S. K., and Olah, C. +Visualizing +weights. Distill, 2021a. doi: 10.23915/distill.00024.007. +URL https://distill.pub/2020/circuits/ +visualizing-weights. +Voss, +C., +Goh, +G., +Cammarata, +N., +Petrov, +M., +Schubert, +L., +and Olah, +C. +Branch specializa- +tion. Distill, 2021b. doi: 10.23915/distill.00024.008. +URL https://distill.pub/2020/circuits/ +branch-specialization. +Wightman, R. Pytorch image models. https://github. +com/rwightman/pytorch-image-models, +2019. +Wightman, R., Touvron, H., and Jegou, H. Resnet strikes +back: An improved training procedure in timm. +In +NeurIPS 2021 Workshop on ImageNet: Past, Present, and +Future, 2021. URL https://openreview.net/ +forum?id=NG6MJnVl6M5. +Xie, S., Girshick, R., Dollar, P., Tu, Z., and He, K. Aggre- +gated residual transformations for deep neural networks. +In Proceedings of the IEEE Conference on Computer +Vision and Pattern Recognition (CVPR), July 2017. +Zagoruyko, S. and Komodakis, N. Wide residual networks. +In Richard C. Wilson, E. R. H. and Smith, W. A. P. +(eds.), Proceedings of the British Machine Vision Confer- +ence (BMVC), pp. 87.1–87.12. BMVA Press, September +2016. ISBN 1-901725-59-6. doi: 10.5244/C.30.87. URL +https://dx.doi.org/10.5244/C.30.87. +Zhang, C., Bengio, S., and Singer, Y. Are all layers created +equal? Journal of Machine Learning Research, 23(67):1– +28, 2022. URL http://jmlr.org/papers/v23/ +20-069.html. +Zhou, H., Lan, J., Liu, R., and Yosinski, J. Deconstructing +lottery tickets: Zeros, signs, and the supermask. In Wal- +lach, H., Larochelle, H., Beygelzimer, A., d'Alch´e-Buc, +F., Fox, E., and Garnett, R. (eds.), Advances in Neural +Information Processing Systems, volume 32. Curran As- +sociates, Inc., 2019. URL https://proceedings. +neurips.cc/paper/2019/file/ + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +1113d7a76ffceca1bb350bfe145467c6-Paper. +pdf. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +A. Potential negative Societal Impacts +We do not believe that our analysis causes any negative societal impacts. As with most publications in this field, our +experiments consumed a lot of energy and caused the emission of CO2. However, training with frozen filters decreases +training time and we, therefore, hope to inspire future researchers to consider this training regime to reduce emissions during +training. +B. Computational Resources +The training was executed on internal clusters with NVIDIA A100-SXM4-40GB GPUs for a total of approximately 58 GPU +days. +C. CIFAR Training Details +We train all CIFAR models for 75 epochs. We use an SGD optimizer (with Nesterov momentum of 0.9) with an initial +learning rate of 1e-2 following a cosine annealing schedule (Loshchilov & Hutter, 2017), a weight decay of 1e-2, a batch +size of 256, and Categorical Cross Entropy with a label smoothing (Goodfellow et al., 2016) of 1e-1. +For training, images are zero-padded by 4 px along each dimension, apply random horizontal flips, and proceed with 322 px +random crops. Test images are not modified. In both cases, the data is normalized by the channel mean and standard deviation. +D. Derivation of the Layer Scale Coefficient +We use the default PyTorch initialization of convolution layers: Weights are drawn from a uniform distribution and scaled +according to (He et al., 2015a). PyTorch uses a default gain of +� +2 +1+α2 with α = +√ +5. Which results in gain = +� +1/3. +Further, the channel input fan is used for normalization. +The standard deviation for weights drawn from normal distributions is given by: +σhe = gain +√ +fan += +gain +√cink2 +(6) +And the standard deviation of a symmetric uniform distribution U[−a,a] is given by: +σ = a/ +√ +3 +(7) +To retain the standard deviation we, therefore, compute the scaling coefficient as follow: +s = +√ +3σhe = +√ +3 gain +√cink2 = +√ +3 +� +1/3 +√cink2 = +1 +√cink2 +(8) +E. First Layer Convolution Filters after Linear Combinations +Figure 14d shows the reconstructed filters (i. e. the convolutions filters obtained by the linear combination in LC-Blocks) in +the first convolutions layers of frozen random and learnable filters at different rates of expansion and for different kernel +sizes. The filters of learnable LCResNets remain fairly similar independent of expansion, while the frozen filters become +less random with increasing depth. A well tracable filter is the green color blob, that evolves from noise to a square blob and +eventually to the gaussian-like filter. Also visible is that larger filters concentrate more of their weights in the center of the +filters. +Figure 15 shows the filter variance entropy (FVE) for the same reconstructed filters. Note that contrary to Section 4.2 we do +not normalize the FVE by the randomness threshold, as it was only derived for 3 × 3 convolutions by the original authors. +Using the non-normalized values, however, allows a comparison independent of kernel size. Once again, we can see that the +FVE remains constant throughout different expansion rates and slightly decreases with increasing kernel size. For all kernel +sizes, we see that frozen random models decrease in FVE at increased expansion. However, the gap between learnable and +frozen random weights significantly increases with increasing kernel size. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +Frozen Random +Learnable +1 +2 +4 +8 +16 +32 +64 +128 +(a) 3x3 +Frozen Random +Learnable +1 +2 +4 +8 +16 +32 +64 +128 +(b) 5x5 +Frozen Random +Learnable +1 +2 +4 +8 +16 +32 +64 +128 +(c) 7x7 +Frozen Random +Learnable +1 +2 +4 +8 +16 +32 +64 +128 +(d) 9x9 +Figure 14. Visualization of the effective convolution filters after linear combination of the first convolution layer in LCResNets-20-16x{E} +with increasing expansion {E} and frozen or learnable spatial convolutions. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +1 +128 +16 +2 +32 +4 +64 +8 +0 +0.2 +0.4 +0.6 +0.8 +1 +1.2 +1.4 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Filter Variance Entropy +(a) 3x3 +1 +128 +16 +2 +32 +4 +64 +8 +0 +0.2 +0.4 +0.6 +0.8 +1 +1.2 +1.4 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Filter Variance Entropy +(b) 5x5 +1 +128 +16 +2 +32 +4 +64 +8 +0 +0.2 +0.4 +0.6 +0.8 +1 +1.2 +1.4 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Filter Variance Entropy +(c) 7x7 +1 +128 +16 +2 +32 +4 +64 +8 +0 +0.2 +0.4 +0.6 +0.8 +1 +1.2 +1.4 +Spatial Convolutions +Frozen Random +Learnable +LC Expansion +Filter Variance Entropy +(d) 9x9 +Figure 15. Variance entropy (not normalized for comparability) as a measure of diversity in filter patterns of the first layer in LCResNet- +20-16x{E} on CIFAR-10 with frozen random or learnable spatial convolutions under increasing LC expansion {E} of the filters in +spatial convolutions and different kernel sizes. +F. Weights of 1×1 Convolutions +Figure 16 shows the distributions of pointwise weights per layer in LCResNet-20-16x{E} on CIFAR-10 with frozen or +learnable spatial convolutions under increasing LC expansion {E}. As expansion increases, the weights become more +sparse for both learnable and frozen random models. +x1 +Layer 1 +Layer 2 +Layer 3 +Layer 4 +Layer 5 +Layer 6 +Layer 7 +Layer 8 +Layer 9 +Layer 10 +Layer 11 +Layer 12 +Layer 13 +Layer 14 +Layer 15 +Layer 16 +Layer 17 +Layer 18 +Layer 19 +x1 +x2 +x2 +x4 +x4 +x8 +x8 +x16 +x16 +x32 +x32 +x64 +x64 +x128 +0.2 +0.1 +0.0 +0.1 +x128 +0.10 +0.05 +0.00 +0.05 +0.10 +0.05 +0.00 +0.05 +0.10 +0.05 +0.00 +0.05 +0.10 +0.10 +0.05 +0.00 +0.05 +0.10 +0.10 +0.05 +0.00 +0.05 +0.10 +0.10 +0.05 +0.00 +0.05 +0.10 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.10 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.05 +0.00 +0.05 +0.050 +0.025 0.000 +0.025 +0.050 +0.04 +0.02 +0.00 +0.02 +0.04 +0.05 +0.00 +0.05 +Figure 16. Distributions of pointwise weights per layer in LCResNet-20-16x{E} on CIFAR-10 with • frozen random or • learnable +spatial convolutions under increasing LC expansion {E}. Each cell in a column shares the same x-axis for comparison. +G. Comparison to other ImageNet models +We provide a comparison between top-1 validation accuracy and learnable parameter size in Figure 17 of our trained Ima- +geNet models (Section 4.5) and reported results of other (non-pre-trained) ImageNet models reported in (Wightman, 2019). +Note, that the learnable parameters do not train to SOTA performance, presumably due to sub-optimal hyperparameters. +The measurements for frozen random models are obtained before weight sharing or pruning, i. e. we expect random frozen +models to further reduce the number of parameters by applying our proposed techniques. Some architectures such as +MobileNet or ResNeXt show barely any savings in learnable parameters, because of depthwise separable convolutions +where the majority of weights are allocated in weight of pointwise convolutions. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +1M +2 +5 +10M +2 +5 +100M +2 +65 +70 +75 +80 +85 +Spatial Convolutions +Frozen Random +Learnable +Learnable Parameters +Top-1 Validation Accuracy [%] +Figure 17. Comparison of our results to other non-pre-trained ImageNet models as reported in (Wightman, 2019). +H. Regularization Ablation +Here, we report the results of Section 4.3 in more detail. Figure 18 shows the final validation accuracy under a number +of tested regularization values. The accuracy stays fairly when increasing up to λ = 5e − 5, and then starts to decrease. +Figure 19 shows the final validation accuracy under different 1 × 1 convolution weight pruning ratios for regularized +(λ = 5e − 5) and unregularized (λ = 0) models. Although regularized models perform slightly worse, they significantly +outperform unregularized models at high pruning ratios. +1e−8 +1e−7 +1e−6 +1e−5 +1e−4 +1e−3 +1e−2 +1e−1 +10 +20 +30 +40 +50 +60 +70 +80 +90 +Regularization Parameter +Validation Accuracy [%] +Figure 18. Effect of different regularization λ on the validation ac- +curacy of a LCResNet-20-16x128 with recycled weight sharing. +0 +20 +40 +60 +80 +10 +20 +30 +40 +50 +60 +70 +80 +90 +Regularization +None +5e-05 +LC Weight Pruning Ratio [%] +Validation Accuracy [%] +Figure 19. Validation accuracy after LC weight pruning of an +LCResNet-20-16x128 with recycled weight sharing trained with +regularization (λ = 5e − 5) and without. The shaded area indicates +the absolute deviation from the mean. + +Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters? +I. Timing and Memory +Table 3 shows the timing and memory consumption of a LCResNet-20-16x128 by consequently applying our proposed +techniques (analogous to Section 4.3). We measure the time for a forward pass and a combination of forward and backward +passes. Further, we individually measure the GPU memory that the model itself, a forward pass, and a combination of +forward and backward passes consume. Lastly, we measure the size of model checkpoints (including model parameters). +Timings are obtained from 7x 1000 runs on a NVIDIA A100-SXM4-40GB GPU for a batch of 64 322 px samples. Memory +during forward/backward passes is also evaluated on the same GPU. The checkpoint size is measured by the allocated +memory on disc from a PyTorch serialized state dict. +Table 3. Timing and Memory Improvements of our proposed techniques evaluated on an LCResNet-20-16x128. +Forward +Fwd.+Bkwd. +Model +Forward Peak +Fwd.+Bkwd. +Checkpoint +Time [ms] +Time [ms] +Mem. [MB] +Mem. [MB] +Peak Mem. [MB] +Mem. [MB] +Baseline +35.7 ± 0.145 +124 ± 1.490 +1002 +8352 +8428 +146.66 ++ Freezing +35.7 ± 0.180 +82.3 ± 0.092 +1002 +8352 +8428 +146.66 ++ Weight Sharing* +35.9 ± 0.165 +82.6 ± 0.131 +888 +8408 +8484 +33.84 ++ Recycled WS* (1/16) +36.2 ± 0.170 +83 ± 0.065 +870 +8410 +8486 +17.74 ++ 1 × 1 Pruning** (ρ = 0.7) +36.1 ± 0.168 +82.8 ± 0.008 +870 +8410 +8486 +17.74 +* Why is there no improvement in memory consumption/timing after weight sharing? +We do not expect weight +sharing to improve timing. In fact, due to the sharing and scaling we expect it even to slightly increase timing. We see +a reduction in memory for the model itself. During forward/backward passes we measure slightly increased memory +consumption. This is due to implementation. Although we implemented the sharing by slicing, which does not allocate +additional memory, we suspect that PyTorch creates copies of the views for the computation graph which results in the +allocation of additional memory and redundancy. We expect an optimized implementation to show noticeable improvements. +** Why is there no improvement in memory consumption/timing after pruning? +Seeing an improvement through +pruning requires specialized hard or software. + diff --git a/r9FIT4oBgHgl3EQfySu_/content/tmp_files/load_file.txt b/r9FIT4oBgHgl3EQfySu_/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e8b012ad91536bc6b0883dac79068935daf4084 --- /dev/null +++ b/r9FIT4oBgHgl3EQfySu_/content/tmp_files/load_file.txt @@ -0,0 +1,1208 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf,len=1207 +page_content='Rethinking 1×1 Convolutions: Can we train CNNs with Frozen Random Filters?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' Paul Gavrikov 1 Janis Keuper 1 2 Abstract Modern CNNs are learning the weights of vast numbers of convolutional operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' In this paper, we raise the fundamental question if this is actu- ally necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' We show that even in the extreme case of only randomly initializing and never up- dating spatial filters, certain CNN architectures can be trained to surpass the accuracy of standard training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' By reinterpreting the notion of pointwise (1 × 1) convolutions as an operator to learn lin- ear combinations (LC) of frozen (random) spatial filters, we are able to analyze these effects and pro- pose a generic LC convolution block that allows tuning of the linear combination rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' Empirically, we show that this approach not only allows us to reach high test accuracies on CIFAR and Ima- geNet but also has favorable properties regarding model robustness, generalization, sparsity, and the total number of necessary weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' Additionally, we propose a novel weight sharing mechanism, which allows sharing of a single weight tensor be- tween all spatial convolution layers to massively reduce the number of weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' Code: https://after-accept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content='com.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' Introduction Convolutional Neural Networks (CNN) are building the backbone of state-of-the-art neural architectures in a wide range of learning applications on n-dimensional array data, such as standard computer vision problems like 2D image classification, semantic segmentation, or scene understand- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' In order to solve these tasks, modern CNN architectures are learning the entries (=weights) of millions of convolu- tional filter kernels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' This process is not only very compute and data intensive, but apparently also mostly redundant as CNNs are learning kernels that are bound to the same distribution, even when training different architectures on Equal contribution 1IMLA, Offenburg University, Offenburg, Germany 2Fraunhofer ITWM, Kaiserslautern, Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/r9FIT4oBgHgl3EQfySu_/content/2301.11360v1.pdf'} +page_content=' Corre- spondence to: Paul Gavrikov 0, θ(0) = 0 and θ(x) = −1 if x < 0. Then the data are classified with respect to +the side of the hyperplane given by the equation xw = 0, by y = θ(xw) function. The position of +the plane (weights w) are taken such that to minimize the distance between the features fi and +corresponding outputs yi = θ(WXi). The implementation of the perceptron can be found in every +standard machine learning books, e.g., [12]. +In the very core of the perceptron idea is that the effective classification is possible if the data +are linearly separable, i.e., separable by a hyperplane. This idea was proved in [11] and is related +to the XOR problem. Since the XOR gate can be recognized as a function that takes as an input +two bits and returns the one bit, the input data for the function can be treated as input data for +perceptron, and the features can be output of XOR function. It is obvious that these data are not +linearly separable, and therefore, there is no possibility that perceptrons separate features 0 from +1 be a hyperplane (in fact, a line on the plane). The ideas in the book were influential enough to +initiate, so called, (first) AI winter, the period around 1980s where the perceptron idea was put on +hold in favor of other AI architectures. +2.3 +Multilayer neural networks and backpropagation algo- +rithm +The problem of not linearly separated data was proven to be solved by showing that the connection +of a few perceptrons in a network called neural network, and learning by back-propagating the error +from the output through the network updating the weights is a solution of XOR problem. These +ideas were described in [15] and started a renovation of the interests in artificial neurons. However, +due to insufficient computing power needed for backpropagation algorithm the practical works were +stalled up to 2010s and this period is called second AI winter. +The appearance of connected artificial neurons gave rise to the new paradigm of computing +called connectionism that computing can be included in the topology (graph of connections between +neurons in a ANN), see [1]. +2.4 +Current state of development +Since the 2010s the increased interest in deep learning, i.e., use of neural networks for practical +computational tasks is reviving. This situation is due to an increasing number of examples where + +2.5. SUMMARY +9 +neural networks can handle data achieving better performance than classical algorithms. +This +progress is also induced by the high interests in deep learning from the biggest IT companies. +The most common architectures used in applications are multilayer neural networks, where +neurons are grouped in layers and each layer’s output is an input to another layer. +The simplification of construction of multilayer networks was provided by the appearance of two +powerful OpenSource libraries: +• TensorFlow (Google Brain Team [5]) - donated by Google; For common use it is accessed by +Keras frontend [2]. +• PyTorch (Meta AI [10]) - donated by Adam Paszke, Sam Gross, Soumith Chintala, and +Gregory Chanan. +These two libraries have enormous infrastructure that was built around them and are considered +as an industrial standard of deep learning. +As for the theoretical side of description of the neural networks, the insight in the idea of work +of NN is expressed in, so called, Universal Approximation Theorems. The first was proved by G. +Cybenko in 1989 [3] for sigmoidal activation function. It was soon realized that the approximation +properties of neural networks rest in multilayer (feedforward) architecture [7], [6]. Roughly stated, +Universal Approximation Theorem says that for a specific topology (e.g., for arbitrary width and +bounded depth) the output functions of feed-forward neural networks are dense in the space of +continuous functions on a compact space and with supremum norm. Recent research in this direction +focuses on estimating the optimal width and depth of layers to obtain best approximation properties, +see e.g., [13] for further references. +There are many unresolved issues related to the work of ANN that are related to the issue of +which functions can be approximated by ANN or the properties of learning algorithms. They are +summarized in the review article [4]. As it was pointed out, at the current state of understanding +there are still many unknowns however the big picture starts to emerge. +2.5 +Summary +Currently, the deep learning progress is motivated by numerous applications starting from com- +puter vision, natural language processing, self-driving cars, and ending to generation of multimedia +(pictures sounds), or design pharmaceutics. The development is pushed by a big industry that +has access to great computing powers needed to run learning algorithms. However, the theoretical +description, improvements of algorithms, or construction of nonstandard (e.g., not multilayers) ar- +chitecture is still possible within the limiting computing resources. As of 2022 it is still an active +and promising area of research. +Acknowledgements +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001 + +10 +BIBLIOGRAPHY +Bibliography +[1] Buckner, +Cameron, +and +James +Garson. +Connectionism +(Stanford +Encyclo- +pedia +of +Philosophy). +Stanford +Encyclopedia +of +Philosophy, +18 +May +1997, +https://plato.stanford.edu/entries/connectionism/. Accessed 22 September 2022 +[2] Chollet, F. Keras library. Keras: the Python deep learning API, https://keras.io/. Accessed +22 September 2022 +[3] Cybenko, G. Approximation by superpositions of a sigmoidal function, Math. Control Sig- +nal Systems, vol. 2, no. 1, 1989, pp. 303–314. https://doi.org/10.1007/BF02551274, +https://doi.org/10.1007/BF02551274 +[4] Weinan, E., et al. Towards a Mathematical Understanding of Neural Network-Based Ma- +chine Learning: what we know and what we don’t, unpublished, vol. 1, no. 1, 2020, p. 56. +https://arxiv.org/abs/2009.10713 +[5] Google +Brain +Team. +TensorFlow +library. +TensorFlow.org, +2015, +https://www.tensorflow.org/. Accessed 22 September 2022 +[6] Hornik, +K. Multilayer feedforward networks are universal approximators, +Neural Net- +works, vol. 4, no. 2, 1991, pp. 251-257. https://doi.org/10.1016/0893-6080(91)90009-T, +https://doi.org/10.1016/0893-6080(91)90009-T. +[7] Hornik, K., et al. Multilayer feedforward networks are universal approximators, Neural Net- +works, vol. 2, no. 5, 1989, pp. 359-366. https://doi.org/10.1016/0893-6080(89)90020-8, +https://doi.org/10.1016/0893-6080(89)90020-8. +[8] Izhikevich, Eugene M. Dynamical Systems in Neuroscience: The Geometry of Excitability and +Bursting, Penguin Random House LLC, 2010. +[9] McCulloch, W.S., and W. A Pitts. A logical calculus of the ideas immanent in ner- +vous activity, Bulletin of Mathematical Biophysics, vol. 5, no. 1, 1943, pp. 115–133, +https://doi.org/10.1007/BF02478259. +[10] Meta AI. PyTorch library. PyTorch.org, https://pytorch.org/. Accessed 22 September 2022. +[11] Minsky, M., and S.A. Papert. Perceptrons: and introduction to computational geometry, MIT +Press, 1988. +[12] Mirjalili, Vahid, and Sebastian Raschka. Python Machine Learning: Machine Learning and +Deep Learning with Python, Scikit-learn, and TensorFlow 2, Packt Publishing, 2019. +[13] Park, Sejun, et al. Minimum Width for Universal Approximation, vol. 1, no. 1, 2020, p. 29. +https://arxiv.org/abs/2006.08859, https://arxiv.org/abs/2006.08859. +[14] Rosenblatt, Frank. The Perceptron – a perceiving and recognizing automaton, Report 85-460-1. +Cornell Aeronautical Laboratory, 1957. +[15] Rumelhart, D., et al. Learning representations by back-propagating errors, Nature, vol. 323, +no. 1, 1986, pp. 533–536; https://doi.org/10.1038/323533a0. +[16] Trappenberg, Thomas. Fundamentals of Computational Neuroscience, OUP Oxford, 2010. + +Chapter 3 +Classical architecture of artificial +neural networks +A. Niemczynowicz, R.A. Kycia, M. Jaworski +3.1 +Introduction +Artificial Neural Networks (ANN) and Deep Learning discipline are currently the one of the most +active fields of research in computer science. This activity is also inspired by a large demand of IT +business for new solutions and architectures that are suitable for solving new problems or solving +more efficiently problems that are currently solvable by classical algorithms. +Apart from various architectures, there is an issue of how to encode data to make them acceptable +as an input to ANN. +The chapter is organized as follows: in the next section we acknowledge standard input data for +ANN. Then we review typical structures of multilayers ANN, which are currently the most used +architectures. Finally we make a list of nonstandard architectures. +3.2 +Encoding of data +There are various types of data in the world. Many of them have standard ways of encoding to the +form that is suitable for ANN processing. We provide an example, and by no means extinguishable +list of data types: +• Multidimensional numerical data that are transformable to vector or matrix forms. They are +described in various Machine Learning books, e.g., [11] +• Images - transformable to matrix representation. See, e.g., [16] +• Text data - transformable to vectors of words, e.g., BagOfWords, TFIDF vectors, transformers +based on neural networks; see, e.g., [9] +• Graph data - represented as, e.g., incidence matrix. See, e.g., [3] +11 + +12 +CHAPTER 3. CLASSICAL ARCHITECTURE OF ARTIFICIAL NEURAL NETWORKS +3.3 +Multilayer ANN +The typical architecture used in industry is a (multilayer) feedforward architecture that consists +of layers of neurons where output of one layer is the input of another one. The acceleration in +this direction was heavily induced by the appearance of two Open Source libraries that allow the +construction of complex ANN architectures. These libraries are: +• TensorFlow ([6]) - donated by Google company. +• PyTorch ([10]) - donated by Adam Paszke, Sam Gross, Soumith Chintala, and Gregory +Chanan. +The general nomenclature in such architectures is as follows: +• Input layer - layer that gets the input data. +• Output layer - layer that returns the result of processing. +• Hidden layers - all layers between Input and Output layers. +Within these frameworks are possible various multilayer architectures and processing capabilities +that will be outlined below. +Fully connected multilayer ANN - this is a network where all neurons in a layer are connected +with all neurons in neighborhood layers. Advantage of this architecture is that all neurons ‘see’ the +whole output of the preceding layer. The main disadvantage of this simple architecture is that the +number of connections in ANN grows enormously with the increase of the number of neurons. +Convolutional ANN (CNN) - the convolution is a mathematical operation of connecting neighbor +input data (words when processing text, pixels when processing images) to feed neurons with more +complete yet local information. This makes sense when the data can be treated as elements of +topological space where there is some notion of closure that represents some real objects, e.g., a +group of neighbor pixels can represent a dog or bird; the context of the sentence is represented by a +sentence of words usually in close proximity. The convolution in general is a tool for grouping ‘close’ +data together at the input, and moreover to provide some notion of group invariance. In typical +applications the convolution is realized by the kernels that are discrete versions of translational- +invariant functions. +However, the general idea is related to equivariance with respect to group +action [4] et al. In typical architecture of CNN the first few layers are convolutional layers that +combine data and as a result reduce dimensionality of the data. +Recurrent Neural Networks (RNN) [13] - these networks can be described as a discrete dynamical +system with feedback. The network is feeded by sequence of data and the output (of hidden layers) +form the previous step. This kind of “recursive processing” allows the network to see correlations +between data from different steps. Therefore such networks are ideal for text processing or time +series predictions. The big drawback of this architecture is the complicated process of learning. +Since the learning process is iterative the gradient used in backward propagation algorithm is +computed many times and this can makes it extremely small or to blow up due to numerical +manipulations. This is the so-called vanishing and exploding gradient problem. These are typical +problems with gradient-based learning algorithms when the number of layers increases. Hopfield +neural networks are a special kind of RNN. +Long Short-Term Memmory ANN (LSTM) [7] - this is the network where each neuron has its own +software memory unit. The processing can be represented as a sequence of steps and the input form + +3.4. OTHER ARCHITECTURES +13 +the previous steps is used to modify output values by means of the use of the memory. LSTM can be +used in processing data where connection between different portions of data is important. Encoder- +Decoder architecture [15]- this rather more abstract architecture consists of two neural networks +one for coding data and the second for encoding. The output is the output from the decoder. This +neural network is designed for coding of sequences, e.g., translating from one language to the other +one. Wehn processing large volumes of data (e.g. text) the decoder can lose the main purpose of +processing, and therefore the attention mechanism was invented [2]. +Generative Adversarial Network (GAN) [5] - is also an architecture consisting of G (the genera- +tive model) and D (the discriminative model). They are learned in tandem where D estimates the +probability that the output comes from the training data rather than from G. +This ends our non-inclusive overview of typical architectures used in typical industrial applica- +tions. In the next section we present some other architectures that are used on smaller scale or in +the research on ANN. +3.4 +Other architectures +We can distinguish: +• Hopfield neural networks [8], [12] - they are modeled on a physical system of spins on a +lattice. Due to these similarities statistical physics methods can be widely applied for this +architecture. +• Boltzmann machines [14] - is another spin-based approach to neural networks. +• ’Algebraic Neural Networks’ - under this title we collected the typical multilayer architec- +tures, where computation is done using real numbers instead of real numbers, e.g., complex +numbers, various Clifford algebras, and Hypercomplex algebras. This is currently a vast field +of theoretical and practical research. +An introduction to the research in this direction is +presented in [1]. +3.5 +Summary +Due to many and still growing number of applications, ANN is an active and extremely promising +research area. Therefore each summary is burdened with incompleteness and risk of fast outdating. +In this chapter, the general overview of current architectures of ANN was presented with short +characteristics. +Acknowledgement +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001 +Bibliography +[1] Arena, P., et al. Neural Networks in Multidimensional Domains: Fundamentals and New +Trends in Modelling and Control, Springer, 1998. + +14 +BIBLIOGRAPHY +[2] Bahdanau, D., et al. Neural Machine Translation by Jointly Learning to Align and Translate; +arXiv: https://arxiv.org/abs/1409.0473. Accessed 22 September 2022. +[3] Cui, Peng, et al., editors. Graph Neural Networks: Foundations, Frontiers, and Applications, +Springer Nature Singapore, 2022. +[4] Finzi, M., et al. Generalizing Convolutional Neural Networks for Equivariance to Lie Groups +on Arbitrary Continuous Data, arXiv:https://arxiv.org/abs/2002.12880. Accessed 22 +September 2022. +[5] Goodfellow, J.J., et al.Generative Adversarial Nets, Proceedings of the International Confer- +ence on Neural Information Processing Systems (NIPS 2014), vol. 1, no. 1, 2014, pp. 2672–2680. +[6] Google Brain Team. TensorFlow library. TensorFlow.org, https://www.tensorflow.org/. Ac- +cessed 22 September 2022. +[7] Hochreiter, S., and J. Schmidhuber. Long Short-Term Memory, Neural Computation, vol. 9, +no. 8, 1997, pp. 1735–1780, https://doi.org/10.1162/neco.1997.9.8.1735. +[8] Hopfield, J.J. Neural networks and physical systems with emergent collective computational +abilities, Proceedings of the National Academy of Sciences, vol. 79, no. 8, 1982, pp. 2554–2558 +https://doi.org/10.1073/pnas.79.8.2554. +[9] Lane, Hobson, et al. Natural Language Processing in Action: Understanding, Analyzing, and +Generating Text with Python, Manning, 2019. +[10] Meta AI. PyTorch library. PyTorch.org, https://pytorch.org/. Accessed 22 September 2022. +[11] Raschka, Sebastian, and Vahid Mirjalili. Python Machine Learning: Machine Learning and +Deep Learning with Python, Scikit-learn, and TensorFlow 2, Packt Publishing, 2019. +[12] H. Ramsauer, B. Sch¨afl, J. Lehner, P. Seidl, M. Widrich, T. Adler, L. Gruber, M. Holzleitner, +M. Pavlovi´c, G.K. Sandve, V. Greiff, D. Kreil, M. Kopp, G. Klambauer, J. Brandstetter, S. +Hochreiter, Hopfield Networks is All You Need, https://arxiv.org/abs/2008.02217 +[13] Rumelhart, D.E., et al. Learning representations by back-propagating errors, Nature, vol. 323, +no. 1, 1986, pp. 533–536, https://doi.org/10.1038/323533a0. +[14] Sherrington, D., and S. Kirkpatrick. Solvable Model of a Spin-Glass, Phys. Rev. Lett., vol. 35, +no. 26, 1972, pp. 1792–1796, https://doi.org/10.1103/PhysRevLett.35.1792. +[15] Sutskever, +I., +et +al. +Sequence +to +Sequence +Learning +with +Neural +Networks, +arXiv:https://arxiv.org/abs/1409.3215. Accessed 22 September 2022. +[16] Tripathi, Suman Lata, et al., editors. Machine Learning Algorithms for Signal and Image +Processing, Wiley, 2022. + +Chapter 4 +Dynamical systems approach to +artificial neural networks +R.A. Kycia, A. Siemaszko +4.1 +Introduction +Data processing in Neural Networks (biological and artificial) can described as a time-dependent +phenomenon. +The mathematical tool to describe the change of a system in time is offered by +Dynamical Systems: Smooth Dynamical systems – for a continuous time parameter usually ranging +from a connected subset of R, or Discrete Dynamical Systems – discrete time steps, varying over +a subset of Z. Therefore, it is natural to ask if these complex systems can be described by the +tools offered by Dynamical Systems. Currently, there are many directions in which the disciplines +of Dynamical Systems and Artificial Neural Networks interpenetrate each other, and in this report, +we indicate some of these directions in this fast-pacing field. +4.2 +Biological Neural Networks +To understand the motivation for applying the Dynamical Systems approach to Artificial Neural +Networks (ANN), we will briefly overview the modeling of biological neural networks. This is a vast +subject of Dynamical Neuroscience (’neurodynamics’), see, e.g., [8], or Chapter 21 of [11]. +Even a single biological neuron is a very complicated electro-biochemical system. The main +focus is on modeling neuron excitations – when an electrochemical impulse passes some threshold, +then the neuron ’fires’, producing a sequence of spikes in voltage transmitted to other neurons by +interconnectors called synapses. This modeling must take into account the self-sustaining states of +inactivity and this producing spikes. In terms of Dynamical Systems, they can be modeled by limit +cycles (attracting or repelling). The standard model for describing these phenomena is a Hodgkin- +Huxley model, a four-dimensional model for cell membrane voltage, sodium and potassium densities +in a cell, and so-called leakage gating [6]. +The phenomenon of oscillation between inactive and spiking states inspired some researchers to +base the computation on such oscillatory behavior, e.g., [2, 3]. Moreover, the threshold behavior +15 + +16CHAPTER 4. DYNAMICAL SYSTEMS APPROACH TO ARTIFICIAL NEURAL NETWORKS +was adapted in the first model of a neuron – the perceptron [12]. +4.3 +Physics-motivated Neural Networks +One of the systems that can be significantly investigated using the qualitative and quantitative +methods of Dynamical Systems is the Hopfield Neural Network [7]. The system is modeled on the +crystal lattice of spins, and powerful techniques of statistical physics are accessible for solving its +parameters. They allow us to estimate the memory capacity and stability of memorized patterns. +For example, for the continuous version of the Hopfield Model, the stability can be analyzed using +a suitable Lyapunov function, e.g., Chapter 20 of [11]. This network model was developed into a +core layer in a multilayer feed-forward ANN [13]. +The other physics-inspired model on spin glass is called the Boltzmann machine [14]. In this +model also the techniques of statistical physics can be applied. +4.4 +Modelling Artificial Neural Networks +Feed-forward multilayer ANN dominates current practical applications. The mathematical under- +standing of their work as a whole has yet to be provided, however, some progress is made [16]. +The current trends of using Dynamical Systems theory to describe ANN focus on various direc- +tions, some of which we summarize in the following subsections. +4.4.1 +Modelling ANN work +The description of ANN using continuous Dynamical Systems is a new idea [17]. In principle, the +multilayer ANN is a discrete Dynamical System, where we have two consecutive steps - performing +a linear operation on the output from the previous step1, and applying a nonlinear function. We +can now devise the idea to model such a network by a continuous Dynamical System. The original +network is recovered by doing a discretization of the model. This approach is more flexible since +powerful mathematical techniques are at our disposal. The problem of regression using the ODE +approach can be formulated as follows [17]: Consider the differential equation +dz +dt = f(A(t), z), +z(0) = x, +(4.4.1) +where z and f are Rd-valued functions, A is a control that need to be found. The solution of this +problem z(t), under linear transformation u(x) = az(x)+b, for real parameters a ∈ Rd, b ∈ R, must +fit the data y(x), i.e., to minimize the distance ||y(x) − u(x)|| in a suitable norm. The information +about the structure of ANN is contained in the function f. The problem of the existence of control +at the level of Dynamical Systems is transferred in this approach to the question if the structure of +ANN is suitable for modeling the data. +Especially interesting in terms of Dynamical Systems are Residual Neural Networks, which can +be brought to discrete dynamical system [17], and in some cases, this system can be modeled as +the Euler scheme for integrating ODEs [9]. Moreover, the Residual Model can be rewritten as a +control problem of transport equation and then rewritten as a PDE on manifold [10]. +1The initial step is fed by the data. + +4.5. DYNAMICAL MODELS PREDICTED BY ANN +17 +4.4.2 +Modelling of learning process +The other aspect of the Dynamical System approach is the way how they model the learning +algorithm. The ANN learning algorithm aims to find a minimum2 for a loss function. This problem +is usually solved by a gradient descent (GD) method. This method in hydrodynamical limit and +using mean-field approximation [16, 5], can be converted into a gradient flow of ANN weight on a +manifold with the Wasserstein metric. This provides new mathematical tools for determining the +convergence of DG methods. +In general, for the continuous approach to Learning ANN, one obtains nonlinear parabolic PDEs, +where all tools from their theory, including optimal choice of function space, variational calculus, +finding an approximate solution, analyzing the stability and attractors, can be applied, for reference +see [18]. +Another approach to learning is the so-called Deep Equilibrium Model [1]. In this approach, +the learning is attained by finding the equilibrium of a Dynamical System that describes ANN. +4.4.3 +Neural ODEs +Another approach in modeling ANN with ODEs are Neural ODEs, the concept presented in [4]. The +idea behind the model is to make the layers continuous. Then the propagation through the network +can be described by ODE and not a difference equation. +This opens an opportunity to apply +adaptive ODE solvers for learning. The drawback of this approach is the limited approximation +capabilities of these architectures, as described in [19]. +4.5 +Dynamical Models predicted by ANN +The opposite direction of using ANN to model and control Dynamical Systems is currently a vast +field of research. We do not pretend to review this field and only provide a reference of review book +[15] instead. +4.6 +Conclusions +Currently, the Artificial Neural Networks and Dynamical Systems theory merge, benefiting both +disciplines. We presented some current trends in this direction, however, the full review is impossible +due to the high volume of results appearing each term. +Acknowledgments +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001. +2In an ideal situation, it should be a global minimum. + +18 +BIBLIOGRAPHY +Bibliography +[1] S. Bai, J.Z. Kolter, V. Koltun, Deep equilibrium models, Advances in Neural Information +Processing Systems 32 (2019) +[2] J. Borresen, S. Lynch, Neuronal computers, Nonlinear Anal. Theory, Meth. and Appl., 71 +2372–2376 (2009) +[3] J. +Borresen, +S. +Lynch, +Oscillatory +threshold +logic, +PLoS +ONE +7(11): +e48498. +doi:10.1371/journal.pone.0048498 (2012). +[4] R.T.Q. Chen, et al., Neural ordinary differential equations, Advances in neural information +processing systems 31 (2018) +[5] L. Chizat, F. Bach, On the global convergence of gradient descent for over- parameterized +models using optimal transport. In Advances in neural information processing systems, pages +3036–3046, (2018) +[6] A.L. Hodgkin and A.F. Huxley, A qualitative description of membrane current and its appli- +cation to conduction and excitation in nerve, J. Physiol. 117 500–544, (1952) +[7] J.J. Hopfield, Neural networks and physical systems with emergent collective computational +abilities. Proceedings of the National Academy of Sciences. 79 (8): 2554–2558 (1982) +[8] E.M. Izhikevich, Dynamical Systems in Neuroscience: The Geometry of Excitability and Burst- +ing, MIT Press, 2010 +[9] Y. Le, +A. Zhong, +Q. Li, +B. Dong, +Beyond Finite Layer Neural Networks: +Bridg- +ing Deep Architectures and Numerical Differential Equations,Proceedings of the 35th +International +Conference +on +Machine +Learning, +PMLR +80:3276–3285 (2018); +arXiv: +https://arxiv.org/abs/1710.10121 +[10] Z. Li, Z. Shi, Deep Residual Learning and PDEs on Manifold, arXiv: 1708.05115v3[cs.IT] +[11] S. Lynch, Dynamical Systems with Applications using Python, Springer 2018 +[12] W.S. McCulloch, W. Pitts, A logical calculus of the ideas immanent in nervous activity, The +Bulletin of Mathematical Biophysics, 5(4):115–133, (1943) +[13] H. Ramsauer, B. Sch¨afl, J. Lehner, P. Seidl, M. Widrich, T. Adler, L. Gruber, M. Holzleitner, +M. Pavlovi´c, G.K. Sandve, V. Greiff, D. Kreil, M. Kopp, G. Klambauer, J. Brandstetter, S. +Hochreiter, Hopfield Networks is All You Need, https://arxiv.org/abs/2008.02217 +[14] D. Sherrington, S. Kirkpatrick, Solvable Model of a Spin-Glass, Physical Review Letters, 35 +(35): 1792–1796, (1975) +[15] Y. Tiumentsev, M. Egorchev, Neural Network Modeling and Identification of Dynamical Sys- +tems, Academic Press, 2019 +[16] E. Weinan, M. Chao, W. Lei, S. Wojtowytsch, Towards a Mathematical Understanding of +Neural Network-Based Machine Learning: What We Know and What We Don’t, CSIAM Trans. +Appl. Math., 1 , 561-615, (2020) + +BIBLIOGRAPHY +19 +[17] E. Weinan, A Proposal on Machine Learning via Dynamical Systems, Commun. Math. Stat. +5:1–11 (2017) +[18] E. Wienan, C. Ma, L. Wu, Machine learning from a continuous viewpoint, I, Sci. China Math. +63, 2233–2266 (2020); DOI: https://doi.org/10.1007/s11425-020-1773-8 +[19] H. Zhang, X. Gao, J. Unterman, T. Arodz, Approximation Capabilities of Neural ODEs and +Invertible Residual Networks, Proceedings of the 37th International Conference on Machine +Learning, PMLR 119:11086-11095, 2020. + +20 +BIBLIOGRAPHY + +Chapter 5 +Neural networks as universal +approximators +J.M. Calabuig, Ll.M. Garc´ıa-Raffi +Since the first golden age (the 1950s and 1960s) when in 1962, Frank Rosenblatt introduced and +developed the perceptron, Artificial Neural Networks (ANNs) have gone through various stages +ranging from enthusiasm to ostracism. When we talk about ANNs, we are talking about math- +ematical tools that play an important role in approximation and classification problems. From a +mathematical point of view, a natural question that arises is whether Artificial Neural Networks +are universal approximators in the sense of mathematics. This question, which may seem trivial or +second-order in view of the applications of ANNs in applied problems, is nevertheless a central issue. +In essence, the certainty of the results achieved in practical problem solved with Artificial Neural +Networks rests on the certainty that they are universal approximators. To find the first answer to +this question we have to go back to the work of Cybenko and Hornik [1, 2] where basically it is +proved that a feed-forward Neural Networks with at least one hidden layer can approximate any con- +tinuous function assuming that certain activation functions are used (sigmoid activation function). +Since then, and as new network topologies emerged with new activation functions, an important +theoretical effort have been done in order to prove the character of universal approximators of ANN +[3, 4, 5, 6, 7, 8]. +Within the question of whether an ANN can approximate a (continuous) function there are two +issues to be addressed. On the one hand, there is the Hornik/Cybenko issue which corresponds +to the question about if some ANN can approximate a given (continuous) function to arbitrary +precision. However, neither the result nor the proof of it give any indication of how “large”ANNs +need to be to achieve a certain approximation accuracy. Then, another issue to be addressed is how +many layers and how many neurons per layer an ANN requires, that is, the approximation rates. +A distinction must be made between shallow learning and deep learning. +In [9] authors study and proof approximation results for ANN with general activation func- +tions: a two layer Neural Network with a polynomially-decaying non-sigmoidal activation function. +They extend the results for a larger class of activation functions, removing the polynomial decay +assumption. This result applies to any bounded, integrable activation function. +In [10] authors address the study of the approximation of continuous functions with very deep +21 + +22 +BIBLIOGRAPHY +networks using the activation function RELU. In this case, not narrow networks (a high number of +neurons per layer) are considered and authors prove that constant-width fully-connected networks +of depth of the order of the number of weights provide the fastest possible approximation rate. +In [11] the narrow case is addressed, that is, networks of bounded width and arbitrary depth. +Specially interesting is the work [12] that address the super-narrow case, that is, with only two +neurons per layer, showing that given enough layers, a super-narrow Neural Network, with two +neurons per layer, is capable to separate any separable binary dataset and if the datasets exhibit +certain type of symmetries, they are better suited for deep representation and may require only few +hidden layers to produce desired classification. +Less literature is found on the consideration of non-standard activation functions. However, this +is a field to be explored in order to obtain networks that are narrow, with a medium level of depth +and a suitable approximation rate. Note that, for example, in traditional convolutional networks +applied to the reconstruction of medical images (e.g. Nuclear Magnetic Resonance Imaging MRI), +the number of weights (neurons+layers) is usually in the order of millions. In short, these are free +parameters in our model and therefore any reduction in their number generates more robust and +simpler mathematical models. +One of the natural extensions to changing the activation function is to consider that the image +of the function is not in R but in C [20, 13]. +This is the case of complex and hypercomplex- +valued Nerural Networks. Beyond being a simple generalization of real-value activation functions, +Complex-Valued Neural Networks (CVNNs) are specially suitable to deal with modelling problems +of complex amplitude –amplitude and phase– the kind of problems that are in the core of wave +physics (electromagnetism, light, sound/ultrasounds, and matter waves). CVNNs give an important +advantage in practical applications in fields where signals are massively analyzed and processed in +time/space, frequency, and phase domains. Hyper-complex ANN as quaternion and Clifford Neural +Networks are further extension of CVNNs ([16, 15, 17, 14, 18, 19]). They seems to be specially suit- +able in color-information treatment, image reconstruction and segmentation, robotics and systems +control. The question about the character as universal approximates and the approximation rates +of CVNNs is currently the subject of investigation [21], cf. [22]. +Acknowledgement +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001 +Bibliography +[1] Cybenko, G. Approximation by superpositions of a sigmoidal function. Mathematics Of Con- +trol, Signals And Systems. 2, 303-314 (1989,12,1), doi.org/10.1007/BF02551274 +[2] Hornik, +K., +Stinchcombe, +M. +& +White, +H. +Multilayer +feedforward +net- +works +are +universal +approximators. +Neural +Networks. +2, +359-366 +(1989), +www.sciencedirect.com/science/article/pii/0893608089900208 + +BIBLIOGRAPHY +23 +[3] Leshno, M., Lin, V., Pinkus, A. & Schocken, S. Multilayer feedforward networks with a non- +polynomial activation function can approximate any function. Neural Networks. 6, 861-867 +(1993), www.sciencedirect.com/science/article/pii/S0893608005801315 +[4] Pinkus, A. Approximation theory of the MLP model in neural networks. Acta Numerica. 8 pp. +143–195 (1999) +[5] Zhou, +D. +Universality +of +deep +convolutional +neural +networks. +Ap- +plied +And +Computational +Harmonic +Analysis. +48, +787-794 +(2020), +www.sciencedirect.com/science/article/pii/S1063520318302045 +[6] Sch¨afer, A. & Zimmermann, H. Recurrent Neural Networks Are Universal Approximators. +Artificial Neural Networks – ICANN 2006. pp. 632-640 (2006) +[7] Heinecke, A., Ho, J. & Hwang, W. Refinement and Universal Approximation via Sparsely +Connected ReLU Convolution Nets. IEEE Signal Processing Letters. 27 pp. 1175-1179 (2020) +[8] Br¨uel Gabrielsson, R. Universal Function Approximation on Graphs. Advances In Neural In- +formation Processing Systems. 33 pp. 19762-19772 (2020) +[9] Siegel, +J. +& +Xu, +J. +Approximation +rates +for +neural +networks +with +gen- +eral +activation +functions. +Neural +Networks. +128 +pp. +313-321 +(2020), +www.sciencedirect.com/science/article/pii/S0893608020301891 +[10] Yarotsky, D. Optimal approximation of continuous functions by very deep ReLU net- +works. +Proceedings +Of +The +31st +Conference +On +Learning +Theory. +75 +pp. +639-649 +(2018,7,6),proceedings.mlr.press/v75/yarotsky18a.html +[11] Kidger, P. & Lyons, T. Universal Approximation with Deep Narrow Networks. Proceed- +ings Of Thirty Third Conference On Learning Theory. 125 pp. 2306-2327 (2020,7,9), +proceedings.mlr.press/v125/kidger20a.html +[12] Szymanski, L. & McCane, B. Deep, super-narrow neural network is a universal classifier. The +2012 International Joint Conference On Neural Networks (IJCNN). pp. 1-8 (2012) +[13] Kobayashi, +M. +Complex-valued +Hopfield +neural +networks +with +real +weights +in +synchronous +mode. +Neurocomputing. +423 +pp. +535-540 +(2021), +www.sciencedirect.com/science/article/pii/S092523122031660X +[14] Kobayashi, +M. +Bicomplex-valued +twin-hyperbolic +Hopfield +neu- +ral +networks. +Neurocomputing. +434 +pp. +203-210 +(2021), +www.sciencedirect.com/science/article/pii/S092523122032021X +[15] Kobayashi, M. Fixed points of split quaternionic hopfield neural networks. Signal Processing. +136 pp. 38-42 (2017), www.sciencedirect.com/science/article/pii/S0165168416303346, +Hypercomplex Signal Processing +[16] Kobayashi, M. Symmetric quaternionic Hopfield neural networks. Neurocomputing. 240 pp. +110-114 (2017), www.sciencedirect.com/science/article/pii/S0925231217303351 +[17] Parcollet, T., Morchid, M. & Linar`es, G. A survey of quaternion neural networks. Artificial +Intelligence Review. 53, 2957-2982 (2020,4,1), doi.org/10.1007/s10462-019-09752-1 + +24 +BIBLIOGRAPHY +[18] Vieira, G. & Valle, M. A general framework for hypercomplex-valued extreme learning ma- +chines. Journal Of Computational Mathematics And Data Science. 3 pp. 100032 (2022), +www.sciencedirect.com/science/article/pii/S2772415822000062 +[19] Da +Cunha, +´E. +& +Da +Fontoura +Costa, +L. +On +hypercomplex +networks. +Phys- +ica +A: +Statistical +Mechanics +And +Its +Applications. +591 +pp. +126714 +(2022), +www.sciencedirect.com/science/article/pii/S0378437121009298 +[20] Nitta, +T. +An +Extension +of +the +Back-Propagation +Algorithm +to +Complex +Numbers. +Neural +Networks. +10, +1391-1415 +(1997), +www.sciencedirect.com/science/article/pii/S0893608097000361 +[21] Voigtlaender, F. The universal approximation theorem for complex-valued neural networks. +ArXiv. abs/2012.03351 (2020) +[22] Vital, W., Vieira, G. & Valle, M. Extending the Universal Approximation Theorem for a Broad +Class of Hypercomplex-Valued Neural Networks. (arXiv, 2022), arxiv.org/abs/2209.02456 + +Chapter 6 +Complex and quaternionic neural +networks +B. Schneider, D. Berseghyan +6.1 +Introduction +Neural networks in the real domain have been studied for a long time and achieved promising +results in many vision tasks for recent years. However, the extensions of the neural network models +in other number fields and their potential applications are not fully-investigated yet. +Complex numbers play an important role in practical applications and fundamental theorems in +various fields of engineering such as electromagnetics, communication, control theory, and quantum +mechanics. The application of complex numbers to neural networks has recently attracted attention +because they tend to improve the learning ability and conform to the above mentioned applications. +They enable the modeling of a point in two-dimensional space as a single entity, rather than +as a set of two data items on which 2D geometrical affine operations are performed. It has been +shown that a neural network with the representation and operations of complex numbers results in +improved performance of the geometrical affine transformation in two-dimensional space, whereas +the performance of real-valued (conventional) neural networks is comparatively poor. The opera- +tions involving complex numbers would improve the performance of neural networks for processing +two-dimensional data, e.g. book [1]. +In the 1870s, William Kingdon Clifford introduced his geometric algebra, building on earlier +works of Sir William Rowan Hamilton and Hermann Gunther Grassmann. Clifford intended to +describe the geometric properties of vectors, planes, and higher-dimensional objects. Most physi- +cists encounter the algebra in the guise of Pauli and Dirac matrix algebras of quantum theory. +Many roboticists or computer graphic engineers use quaternions for 3D rotation estimation and +interpolation, as it is too difficult for them to formulate homogeneous transformations of high-order +geometric entities using a point-wise approach. +They resort often to tensor calculus for multi- +variable calculus. Since robotics and engineering make use of the developments of mathematical +physics, many beliefs are automatically inherited; for instance, some physicists come away from a +study of Dirac theory with the view that Clifford’s algebra is inherently quantum-mechanical. +25 + +26 +CHAPTER 6. COMPLEX AND QUATERNIONIC NEURAL NETWORKS +Extension of neural networks on hypercomplex number system is one of such research efforts. +Input, output, and internal state of a neuron which is the basic computational unit are represented +by hypercomplex number in these types of neural networks. +Quaternion neural networks are models in which computations of the neurons are based on +quaternions, the four-dimensional equivalents of imaginary numbers. The quaternion neural net- +work also performs superior in terms of convergence speed to a real-valued neural network with +respect to the 3-bit parity check problem, as simulations show. Consequently, the application of hy- +percomplex numbers, particularly quaternions, to neural networks has been investigated. Quater- +nions are a class of hypercomplex number systems, a four-dimensional extension of imaginary +numbers. +One of benefits by quaternions is that affine transformation of geometric figures in +three-dimensional space (3D geometrical affine transformation), especially spatial rotation, can be +represented compactly and efficiently; in recent years, quaternions are extensively used in the fields +of robotics, control of satellites, and computer graphics, etc, see for example [2]. +In that sense, it is thought that it is very useful to employ complex numbers and quaternions +that can calculate two or three dimensional information as a unit as expressions of neurons. In +fact, it is suggested that complex-valued and quaternionic feed forward neural networks have a +remarkable learning ability in terms of affine transformation problems in two or three dimensional +space. +The role of Neural Networks in today’s scientific community cannot be denied, its vast +applications, from engineering to medicine, these are based on continuously improving algorithms. +This motivated our research group to begin the approach towards the creation of a mathematical +basis for the field of Hypercomplex Neural Networks which could bring better, faster algorithms, +and be useful in a wide range of computations. +In the underlying mathematical theories, the choice of a system of constants plays an important +role, and advancing from a theory built on real numbers to hypercomplex ones is bound to give +improved algorithms, due to the rich analysis of the field. +6.2 +Elements of quaternionic analysis +In this section we present briefly the basic definitions and results of quaternionic analysis which are +necessary for our purpose. For more information, we refer the reader to [8, 9]. +Let H be the set of real quaternions, i.e., that each quaternion a is represented in the form +a = a0 + a1i + a2j + a3k, with {ak} ⊂ R, k = 0, 1, 2, 3 and i, j, k are the quaternionic imaginary +units. The basic elements define arithmetic rules in H, which are given by the following relations: +i2 = j2 = k2 = −1; ij = k = −ji; jk = i = −kj; ki = j = −ik. +The quaternionic conjugation of a = a0 + a1i + a2j + a3k is given by ¯a := a0 − a1i − a2j − a3k. It +is easy seen that a¯a = ¯aa = a2 +0 + a2 +1 + a2 +2 + a2 +3. Note that for a, b ∈ H, ab = ¯b¯a. +We identify the space C2 with the set H of quaternions: let (z1, z2) = (x0 + ix1, x2 + ix3) be two +complex numbers with the imaginary unit i, and let j be another imaginary unit such that j2 = −1 +and ij + ji = 0 hold. In particular, for a ∈ C and by abuse of notation if ¯a denoted the complex +conjugate of a, we have aj = j¯a. The set of elements of the form q = z1 + z2j, z1, z2 ∈ C, endowed +both with a component-wise addition and with the associative multiplication is then another way +of stating H. The quaternion conjugation gives: z1 + z2j := ¯z1 − z2j and q¯q = ¯qq = |z1|2 + |z2|2. +Let E be a bounded subset of R4 ∼= C2 ∼= C × C and denote by BC(E, H) the class of H-valued +bounded continuous functions on E. For f ∈ BC(E, H) we define the modulus of continuity of f + +6.2. ELEMENTS OF QUATERNIONIC ANALYSIS +27 +as a non-negative function wf(δ), δ > 0, given by +wf(δ) := +sup +|x−y|≤δ +{|f(x) − f(y)| : x, y ∈ E}. +For 0 < ν ≤ 1 if +sup +0<δ≤diam E +�wf(δ) +δν +� +< ∞, for 0 < ν ≤ 1, +then f becomes a H¨older continuous with exponent ν function in E (Lipschitz continuous for ν = 1). +We will denote by +C0,ν(E, H) := {f ∈ BC(E, H) : +sup +0<δ≤diam E +�wf(δ) +δν +� +< ∞}, +the collection of H¨older continuous functions on E, for 0 < ν ≤ 1. +We say ([6]) that a closed set E in R4 is an Ahlfors-David regular set (in short AD-regular) if +there exists a constant c > 0 such that for all x ∈ E and 0 < r < diam E there holds +c−1r3 ≤ H3(E ∩ B(x, r)) ≤ cr3, +where B(x, r) is closed ball with center x and radius r and H3 is the 3-dimensional Hausdorff +measure. The AD-regularity condition implies a uniform positive and finite bound on E for the +upper and lower density. +Moreover, we notice that such condition produces a very wide class +of surfaces that contains the classes of surfaces classically considered in the literature: Liapunov +surfaces, smooth surfaces and Lipschitz ones. Finally we would like to remark that AD-regular sets +are not always rectifiable in the sense of Federer [7]. +In what follows, Ω ⊂ R4 stands for a bounded domain with an AD-regular rectifiable boundary +Γ and let Ω+ := Ω; Ω− := R4 \ Ω+, where both open sets are assumed to be connected. +For continuously real-differentiable function H-valued functions f := f0+f1i+f2j+f3k : Ω → H, +the operator +ψD := +∂ +∂x0 ++ i ∂ +∂x1 +− j ∂ +∂x2 ++ k ∂ +∂x3 +, +associated to the structural set H-vector ψ := {1, i, −j, k} is called the Cauchy–Riemann operator, +which can be written in complex form as: +ψD = 2 +� ∂ +∂¯z1 +− j ∂ +∂¯z2 +� +A factorization of the Laplacian is given by +ψD +¯ +ψD = +¯ +ψD ψD = ∆H, +where +¯ +ψD := 2 +� ∂ +∂¯z1 ++ j ∂ +∂¯z2 +� +, +and ∆H[f] := ∆R4[f0] + ∆R4[f1] + ∆R4[f2] + ∆R4[f3]. +A function f : +Ω → H is called left +ψ−hyperholomorphic in Ω if +ψD[f](ξ) = 0 for ∀ξ ∈ Ω. + +28 +CHAPTER 6. COMPLEX AND QUATERNIONIC NEURAL NETWORKS +We will denote by +ψM(Ω, H) := {f ∈ C1(Ω, H) : ψD[f](ξ) = 0, ∀ξ ∈ Ω}. +Under assumption f ∈ ψM(Ω, H) and following similar arguments to those in [4, page 3875] we +have the Cauchy integral formula +� +Γ +Kψ(τ − t) · nψ(τ) · f(τ) dH3 +τ = f(t), t ∈ Ω+. +(6.2.1) +For a survey of the theory of ψ-hyperholomorphic functions along classical lines we refer the reader +to [12]. +An easy computation shows that if f = u + vj with u = f0 + if1 and v = f2 + if3, then +ψDf = 0 ⇐⇒ +� +∂¯z1u + ∂z2¯v = 0 +∂¯z2u − ∂z1¯v = 0, +which express the direct relation between the ψ-hyperholomorphic functions and solutions of the +Cimmino system. +The most important examples of a ψ-hyperholomorphic function is the function +Kψ(q) = +1 +2π2 +¯z1 + ¯z2j +(|z1|2 + |z2|2)2 , z1, z2 ̸= 0, +which is obtained by applying ¯ +ψD to the fundamental solution of the Lapacian ∆R4. It is known +as the Cauchy kernel and it represents a fundamental solution to both operators ψD and ¯ +ψD. +6.3 +Poincar´e-Bertrand formula for ψ-hyperholomorphic sin- +gular integrals +The Cauchy kernel Kψ generates important integrals for us: +• +ψCΓ[f](q) := +� +Γ +Kψ(ξ − q) nψ(ξ) · f(ξ) dH3 +ξ, q ∈ R4 \ Γ, +• +ψSΓ[f](q) = 2 +� +Γ +Kψ(ξ − q) nψ(ξ) (f(ξ) − f(q)) dH3 +ξ + f(q), q ∈ Γ, +where nψ(ξ) := n0 + n1i − n2j + n3k with (n0, n1, n2, n3) ∈ R4 being the outward unit normal +vector on Γ. +By using ideas from [3], we have +Remark 6.3.1 In general, the integral +� +Γ +Kψ(ξ − q) nψ(ξ) dH3 +ξ + +6.3. POINCAR´E-BERTRAND FORMULA FOR ψ-HYPERHOLOMORPHIC SINGULAR INTEGRALS29 +has no sense for every q ∈ Γ, hence the formula +� +Γ +Kψ(ξ−q) nψ(ξ) (f(ξ)−f(q)) dH3 +ξ = +� +Γ +Kψ(ξ−q) nψ(ξ) f(ξ) dH3 +ξ− +�� +Γ +Kψ(ξ − q) nψ(ξ) dH3 +ξ +� +f(q) +is generaly not valid. In case when the singular integral +2 +� +Γ +Kψ(ξ − q) nψ(ξ) dH3 +ξ +has a finite value α(q) for ∀q ∈ Γ, then +ψSΓ[f](q) = 2 +� +Γ +Kψ(ξ − q) nψ(ξ) f(ξ) dH3 +ξ + (1 − α(q))f(q). +While the first is a ψ-hyperholomorphic version of the usual Cauchy type integral the second +represents its singular version, whose integral has to be taken in the sense of Cauchy’s principal +value. +In order to facilitate their usage, we present below some basic properties of the ψ-hyperholomorphic +singular integrals, thus making our exposition self-contained. +Theorem 6.3.2 [5] Let Ω be a bounded domain in R4 with AD-regular boundary Γ. +Let f ∈ +C0,ν(Γ, H). Then the following limits exist: +lim +Ω±∋q→ξ∈Γ(ψCΓ[f](q)) =: ψC± +Γ [f](ξ), +moreover the following identities hold: +ψC± +Γ [f](ξ) = 1 +2[ψSΓ[f](ξ) ± f(ξ)], +(6.3.1) +for all ξ ∈ Γ. +Theorem 6.3.3 [5] If Γ is a AD-regular surface, then for f ∈ C0,ν(Γ, H), 0 < ν < 1 we have the +following formula: +ψS2 +Γ[f](ξ) = f(ξ), ξ ∈ Γ. +Lemma 6.3.4 If {t, τ} ⊂ Γ, t ̸= ξ, then +� +Γτ +Kψ(τ − t) nψ(τ) Kψ(τ − ξ) dH3 +τ = 0. +Proof. The proof of Lemma 6.3.4 is similar of the proof of Lemma 3 in [11], therefore we refer to +[11] for identical parts. +Lemma 6.3.5 Let f(ξ, τ) := f0(ξ,τ) +|ξ−τ|µ , 0 ≤ µ < 3, and f0 ∈ C0,ν(Γ × Γ, H). Then the next formula +of changing of integration order is true for all t ∈ Γ: +� +Γτ +Kψ(τ − t) nψ(τ) [f(ξ, τ) − f(τ, τ)] dH3 +τ +� +Γξ +nψ(ξ) dH3 +ξ = += +� +Γξ +� +Γτ +Kψ(τ − t) nψ(τ) [f(ξ, τ) − f(τ, τ)] dH3 +τ nψ(ξ) dH3 +ξ. + +30 +CHAPTER 6. COMPLEX AND QUATERNIONIC NEURAL NETWORKS +Proof. The proof of Lemma 6.3.5 is along the same line of the proof of Theorem 22.5 in [10]. +□ +The Poincar´e-Bertrand formula in the ψ-hyperholomorphic framework is established by our next +theorem. +Theorem 6.3.6 Let Ω be a bounded domain in R4 with AD-regular boundary Γ and let f ∈ C0,ν(Γ× +Γ, H). Then for all w ∈ Γ +� +Γz +Kψ(z − t) nψ(z) dH3 +z +� +Γξ +Kψ(ξ − z) nψ(ξ)[f(ξ, z) − f(z, t)] dH3 +ξ = += +� +Γξ +� +Γz +Kψ(z − t) nψ(z) dH3 +z Kψ(ξ − z) nψ(ξ)[f(ξ, z) − f(z, t)] dH3 +ξ + α2(t)f(t, t), +and the integrals being understood in the sense of the Cauchy principal value. +If Ω be a bounded domain in R4 with a smooth boundary Γ then α = 1 +2 and the formula reduces +to the Poincar´e-Bertrand formula (see, e.g., [11]). +Proof. Let +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) [f(ξ, τ) − f(τ, t)] dH3 +ξ = += +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) ([f(ξ, τ) − f(τ, t)] − f(τ, τ)) dH3 +ξ+ ++ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) [f(τ, τ) − f(t, t)] dH3 +ξ+ ++ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) f(t, t) dH3 +ξ. +In the first two quaternionic integrals on the right-hand side we can change the order of integration +by Lemma 6.3.5 we have +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) [f(ξ, τ) − f(τ, t)] dH3 +ξ = += +� +Γξ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ Kψ(ξ − τ) nψ(ξ) ([f(ξ, τ) − f(τ, t)] − f(τ, τ)) dH3 +ξ+ ++ +� +Γξ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ Kψ(ξ − τ) nψ(ξ) [f(τ, τ) − f(t, t)] dH3 +ξ+ ++ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) f(t, t) dH3 +ξ = += +� +Γξ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ Kψ(ξ − τ) nψ(ξ) [f(ξ, τ) − f(τ, t)] dH3 +ξ− +− +� +Γξ +�� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ Kψ(ξ − τ) +� +nψ(ξ) f(t, t) dH3 +ξ+ + +6.4. POINCAR´E-BERTRAND FORMULA FOR THE CAUCHY-CIMMINO SINGULAR INTEGRALS31 ++ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ) f(t, t) dH3 +ξ = +(by using Lemma 6.3.4 and the Remark 6.3.1) += +� +Γξ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ Kψ(ξ − τ) nψ(ξ) [f(ξ, τ) − f(τ, t)] dH3 +ξ + α2(t)f(t, t). +□ +Suppose that f(ξ, τ) = f(ξ) ∈ C0,ν(Γ, H) is ψ-hyperholomorphic extension into Ω, then the +composition formula for ψ-hyperholomorphic functions can be written as: +Theorem 6.3.7 (Composition formula) Let Ω be a bounded domain in R4 with AD-regular bound- +ary Γ and let f ∈ C0,ν(Γ, H). If f(ξ) can be extended ψ-hyperholomorphically into Ω. Then for all +t ∈ Γ, +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ +� +Γξ +Kψ(ξ − τ) nψ(ξ)[f(ξ) − f(τ)] dH3 +ξ = α2(t)f(t). +(6.3.2) +Note that if +ψ ˜Sf := 2 +� +Γξ +Kψ(ξ − τ) nψ(ξ)[f(ξ) − f(τ)] dH3 +ξ, +than formula (6.3.2) means that +ψ ˜S2f = 4α2(t)f(t). +Proof. Since f(ξ) can be holomorphic extented into Ω, then by Theorem 6.3.2 and Remark 6.3.1 +ψC+f(z) = f(z), z ∈ Ω. +By formula (6.3.1) and Remark 6.3.1, we have +2f(z) = ψ ˜Sf(ξ) + 2(1 − α(z)) f(z), +moreover +ψ ˜S2f = ψ ˜S ψ ˜Sf = ψ ˜S[2αf] = 4α2f. +□ +6.4 +Poincar´e-Bertrand formula for the Cauchy-Cimmino sin- +gular integrals +Using the representation of the quaternionic Cauchy kernel Kψ and the normal vector nψ in the +complex form, we have: +Kψ(ξ − z) nψ(ξ) = K1(ξ, z) + K2(ξ, z)j, +(6.4.1) +with +K1(ξ, z) := +1 +2π2 +(¯ξ1 − ¯z1)(n0 + in1) + (¯ξ2 − ¯z2)(n2 + in3) +(|ξ1 − z1|2 + |ξ2 − z2|2)2 +; +(6.4.2) + +32 +CHAPTER 6. COMPLEX AND QUATERNIONIC NEURAL NETWORKS +and +K2(ξ, z) := +1 +2π2 +(¯ξ2 − ¯z2)(n0 + in1) − (¯ξ1 − ¯z1)(n2 + in3) +(|ξ1 − z1|2 + |ξ2 − z2|2)2 +, +(6.4.3) +where ξ = ξ1 + ξ2j, z = z1 + z2j. Thus, +ψCΓ[u + vj](z1, z2) = C1[u, v](z1, z2) + C2[u, v](z1, z2)j, (z1, z2) /∈ Γ, +ψSΓ[u + vj](z1, z2) = S1[u, v](z1, z2) + S2[u, v]j, (z1, z2) ∈ Γ, +where +C1[u, v](z1, z2) = +� +Γ +[(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]u(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3 +ξ1,ξ2− +− +� +Γ +[(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯v(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3 +ξ1,ξ2, +C2[u, v](z1, z2) = +� +Γ +[(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]v(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3 +ξ1,ξ2+ ++ +� +Γ +[(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯u(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3 +ξ1,ξ2. +The pair (C1, C2) of integrals for (z1, z2) ∈ C2 play the role of an analog of a Cauchy type integral +in theory of the Cimmino system of partial differential equations. +Similarly the singular Cauchy-Cimmino integral operators are defined formally as pair (S1, S2), +of the following singular integrals taken in the sense of Cauchy’s principal value +S1[u, v](z1, z2) = 2 +� +Γ +[(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)][u(ζ1, ζ2) − u(z1, z2)] +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3− +−2 +� +Γ +[(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)][¯v(ζ1, ζ2) − ¯v(z1, z2)] +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3+ ++u(z1, z2) = += 2 +� +Γ +[(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]u(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3− +−2 +� +Γ +[(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯v(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3+ ++(1 − α(z1, z2)) u(z1, z2), +S2[u, v](z1, z2) = 2 +� +Γ +[(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)][v(ζ1, ζ2) − v(z1, z2)] +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3+ + +6.4. POINCAR´E-BERTRAND FORMULA FOR THE CAUCHY-CIMMINO SINGULAR INTEGRALS33 ++2 +� +Γ +[(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)][¯u(ζ1, ζ2) − ¯u(z1, z2)] +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3+ ++v(z1, z2) = += 2 +� +Γ +[(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]v(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3+ ++2 +� +Γ +[(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯u(ζ1, ζ2) +2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 +dH3+ ++(1 − α(z1, z2)) v(z1, z2). +Returning to Section 6.3 substitute (6.4.2) and (6.4.3) into Theorem 6.3.6, we have: let Ω be a +bounded domain in R4 with AD-regular boundary Γ and let (u, v) ∈ C0,ν(Γ×Γ, C)×C0,ν(Γ×Γ, C), +then for all t ∈ Γ +� +Γτ +� +Γξ +[K1(τ − t) + K2(τ − t)j] {[K1(ξ − τ) + K2(ξ − τ)j](u(ξ, τ) − u(τ, t)+ ++(v(ξ, τ) − v(τ, t))j)dH3 +ξ dH3 +τ +� += += +� +Γτ +� +Γξ +[K1(τ − t) + K2(τ − t)j] {K1(ξ − τ)(u(ξ, τ) − u(τ, t)) + K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ ++K2(ξ − τ)j(u(ξ, τ) − u(τ, t)) + K2(ξ − τ)j(v(ξ, τ) − v(τ, t))jdH3 +ξ dH3 +τ +� += += +� +Γτ +� +Γξ +{K1(τ − t) K1(ξ − τ)(u(ξ, τ) − u(τ, t))+ ++K1(τ − t) K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ ++K1(τ − t) K2(ξ − τ)j(u(ξ, τ) − u(τ, t))+ ++K1(τ − t) K2(ξ − τ)j(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K1(ξ − τ)(u(ξ, τ) − u(τ, t))+ ++K2(τ − t) j K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ ++ K2(τ − t) j K2(ξ − τ) j (v(ξ, τ) − v(τ, t)) j} dH3 +ξ dH3 +τ. +Note that +� +Γξ +� +Γτ +Kψ(τ − t) nψ(τ) dH3 +τ Kψ(ξ − τ) nψ(ξ)[f(ξ, τ) − f(τ, t)] dH3 +ξ + α2(t)f(t, t) = += +� +Γξ +� +Γτ +{K1(τ − t) K1(ξ − τ)(u(ξ, τ) − u(τ, t))+ ++K1(τ − t) K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ + +34 +CHAPTER 6. COMPLEX AND QUATERNIONIC NEURAL NETWORKS ++K1(τ − t) K2(ξ − τ)j(u(ξ, τ) − u(τ, t))+ ++K1(τ − t) K2(ξ − τ)j(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K1(ξ − τ)(u(ξ, τ) − u(τ, t))+ ++K2(τ − t) j K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ ++ K2(τ − t) j K2(ξ − τ) j (v(ξ, τ) − v(τ, t)) j} dH3 +τ dH3 +ξ+ ++α2(t)(u(t, t) + v(t, t)j). +If one separates complex coordinates into above equality, then the following equalities can be easy +obtained formulae for Cimmino system: +� +Γτ +� +Γξ +[K1(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))+ ++K1(τ − t) K2(ξ − τ) j(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) jK1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K2(ξ − τ) j(u(ξ, τ) − u(τ, t))] dH3 +ξ dH3 +τ = += +� +Γξ +� +Γτ +[K1(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))+ ++K1(τ − t) K2(ξ − τ) j(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ ++K2(τ − t) j K2(ξ − τ) j(u(ξ, τ) − u(τ, t))] dH3 +τ dH3 +ξ+ ++α2(t) u(t, t); +and +� +Γτ +� +Γξ +[K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))j+ ++K1(τ − t) K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ ++K2(τ − t) j K1(ξ − τ) (u(ξ, τ) − u(τ, t))+ ++K2(τ − t) j K2(ξ − τ) j(v(ξ, τ) − v(τ, t))j] dH3 +ξ dH3 +τ = += +� +Γξ +� +Γτ +[K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))j+ ++K1(τ − t) K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ ++K2(τ − t) j K1(ξ − τ) (u(ξ, τ) − u(τ, t))+ ++K2(τ − t) j K2(ξ − τ) j(v(ξ, τ) − v(τ, t))j] dH3 +τ dH3 +ξ+ ++α2(t) v(t, t)j. + +6.4. POINCAR´E-BERTRAND FORMULA FOR THE CAUCHY-CIMMINO SINGULAR INTEGRALS35 +Here, we have +� +Γτ +� +Γξ +[K1(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− +(6.4.4) +−K1(τ − t) K2(ξ − τ)(v(ξ, τ) − v(τ, t))− +−K2(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))− +−K2(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))] dH3 +ξ dH3 +τ = += +� +Γξ +� +Γτ +[K1(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− +−K1(τ − t) K2(ξ − τ)(v(ξ, τ) − v(τ, t))− +−K2(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))− +−K2(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))] dH3 +τ dH3 +ξ+ ++α2(t)u(t, t); +and +� +Γτ +� +Γξ +[K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))+ +(6.4.5) ++K1(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))+ ++K2(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− +−K2(τ − t) K2(ξ − τ) (v(ξ, τ) − v(τ, t))] dH3 +ξ dH3 +τ = += +� +Γξ +� +Γτ +[K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))+ ++K1(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))+ ++K2(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− +−K2(τ − t) K2(ξ − τ) (v(ξ, τ) − v(τ, t))] dH3 +τ dH3 +ξ+ ++α2(t) v(t, t). +Let +N1[f](z) := 2 +� +Γ +K1(ξ − z) f(ξ) dH3 +ξ + (1 − α(z))f(z), ∀z ∈ Γ, +and +N2[f](z) := −2 +� +Γ +K2(ξ − z)f(ξ) dH3 +ξ + (1 − α(z))f(z), ∀z ∈ Γ. +If one separates complex coordinates in Lemma 6.3.4 we have: +� +Γξ +� +Γτ +K1(τ − z) K1(ξ − τ) u(ξ) dH3 +τ dH3 +ξ − +� +Γξ +� +Γτ +K1(τ − z) K2(ξ − τ) v(ξ) dH3 +τ dH3 +ξ− + +36 +CHAPTER 6. COMPLEX AND QUATERNIONIC NEURAL NETWORKS +− +� +Γξ +� +Γτ +K2(τ − z) K1(ξ − τ) v(ξ) dH3 +τ dH3 +ξ − +� +Γξ +� +Γτ +K2(τ − z) K2(ξ − τ) u(ξ) dH3 +τ dH3 +ξ = 0, +� +Γξ +� +Γτ +K1(τ − z) K1(ξ − τ) v(ξ) dH3 +τ dH3 +ξ + +� +Γξ +� +Γτ +K1(τ − z) K2(ξ − τ) u(ξ) dH3 +τ dH3 +ξ+ ++ +� +Γξ +� +Γτ +K2(τ − z) K1(ξ − τ) u(ξ) dH3 +τ dH3 +ξ − +� +Γξ +� +Γτ +K2(τ − z) K2(ξ − τ) v(ξ) dH3 +τ dH3 +ξ = 0. +Thus, if functions u and v depend only on ξ, then we can write: +N 2 +1 − N 2 +2 = I, +(6.4.6) +N1 N2 + N2 N1 = 0. +(6.4.7) +Remark 6.4.1 Note that N 2 +2 ̸= 0 in (6.4.6). Indeed, if N 2 +2 [f] = 0 for all f ∈ C1,ν(Γ, C). Then the +function N2[f] can be holomorphically extended from Γ into Ω+ and by the uniqueness theorem for +harmonic functions this extension is given by +F(z) = −2 +� +Γ +K2(ξ − z)f(ξ) dH3 +ξ, z ∈ Ω+. +But then ψC[f] and (6.4.1) imply that the function +Gf(z) := +� +Γ +K1(ξ − z) f(ξ) dH3 +ξ, +is holomorphic for any f ∈ C1,ν(Γ, C) which is not true. +Theorem 6.4.2 Let f ∈ C1(Ω+, C) is representable in Ω+ ⊂ C2 by +f(z) = +� +Γ +K1(ξ − z) f(ξ) dH3 +ξ, z ∈ Ω+. +Then f is holomorphic in Ω+. +Proof. Applying the Sokhotski-Plemelj formulae to f we have +f(z) = 1 +2 [N1[f](z) + f(z)], z ∈ Γ. +Thus, N 2 +1 [f] = I[f], and from (6.4.6) we have N 2 +2 [f] = 0. But by Remark 6.4.1 we have that +function F defined by +F(z) := +� +Γ +K1(ξ − z) f(ξ) dH3 +ξ +is holomorphic in Ω+ and with F | Ω+ = u we completed the proof. +□ +From Lemma 6.3.4 and [10, page 211], the term +� +Γξ +� +Γτ +K1(τ − t) K1(ξ − τ) dH3 +τ dH3 +ξ = 0 ∀t ∈ Γ. + +BIBLIOGRAPHY +37 +Then from Section 6.4, we have that +� +Γξ +� +Γτ +K2(τ − t) K2(ξ − τ) dH3 +τ dH3 +ξ = 0. +(6.4.8) +So, by using (6.4.8) and Theorem 6.3.6, for t ∈ Γ and f ∈ C0,ν(Γ × Γ, C) we have +� +Γτ +� +Γξ +K2(τ − t) K2(ξ − τ) [f(ξ, τ) − f(τ, t)] dH3 +τ dH3 +ξ = +(6.4.9) += +� +Γξ +� +Γτ +K2(τ − t) K2(ξ − τ) [f(ξ, τ) − f(τ, t)] dH3 +ξ dH3 +τ. +Comparing that last equality with (6.4.4) and (6.4.5), for f ∈ C0,ν(Γ × Γ, C) for singular integrals +for Cimmino system the structural analog of the Poin´care-Bertrand formula is true: +� +Γτ +� +Γξ +K1(τ − t) K1(ξ − τ) [f(ξ, τ) − f(τ, t)]dH3 +ξ dH3 +τ = += +� +Γξ +� +Γτ +K1(τ − t) K1(ξ − τ) [f(ξ, τ) − f(τ, t)]dH3 +τ dH3 +ξ + α2(t)f(t, t). +Acknowledgement +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001 +Bibliography +[1] Hirose, A. Complex-Valued Neural Networks - Advances and Applications. John Wiley & Sons +Inc. 2013, 304 pp. +[2] Isokawa, T., Kusakabe, T., Matsui, N., Peper, F. Quaternion Neural Network and Its Appli- +cation. In: Palade, V., Howlett, R.J., Jain, L. (eds) Knowledge-Based Intelligent Information +and Engineering Systems. KES 2003. Lecture Notes in Computer Science, vol 2774. Springer, +Berlin, Heidelberg, 2003. +[3] R. Abreu Blaya, J. Bory Reyes and B. Kats (2015): Cauchy integral and singular integral +operator over closed Jordan curves. Monatsh Math. 176: 1–15. +[4] R. Abreu Blaya, J. Bory Reyes, A. Guzm´an Ad´an and B. Schneider (2012): Boundary value +problems for the Cimmino system via quaternionic analysis. Appl. Math. Comp., 219, 3872– +3881. +[5] R. Abreu Blaya, J. Bory Reyes and B. Schneider (2014): On Cauchy type integrals related to +the Cimmino system of partial differential equations. Operator theory, operator algebras and +applications, 81–92, Oper. Theory Adv. Appl., 242, Birkh¨auser/Springer, Basel. + +38 +BIBLIOGRAPHY +[6] G. David and S. Semmes (1993): Analysis of and on uniformly rectifiable sets. Mathematical +Surveys and Monographs 38, AMS, Providence, R.I. +[7] H. Federer (1969): Geometric Measure Theory, Grundlehren Math. Wiss. 153, Springer, New +York. +[8] K. G¨urlebeck and W. Spr¨ossig (1997): Quaternionic and Clifford Calculus for Physicists and +Engineers. John Wiley & Sones, England, 371 pp. +[9] V. Kravchenko and M. Shapiro (1996): Integral Representations for Spatial Models of Mathe- +matical Physics, Pitman Res. Notes in Math. Ser., vol. 351, Longman, Harlow. +[10] A. M. Kytmanov (1995): The Bochner-Martinelli Integral and Its Applications. Birkh¨auser. +[11] I. Mitelman, M. Shapiro (1994): Formulae of changing of integration order and of inversion +for some multidimensional singular integrals and hypercomplex analysis, J. Nat. Geom. 5 (1), +11–27. +[12] M. Shapiro (1995): Some remarks on generalizations of the one-dimensional complex analysis: +Hypercomplex approach. Functional Analytic Methods in Complex Analysis and Applications +to Partial Differential Equations (Trieste, 1993). World Scientific Publ., River Edge, N.J., +379–401. + +Chapter 7 +Fuzzy neural networks +I. Perfiljeva, V. Novak +In the field of artificial intelligence, neuro-fuzzy refers to combination of artificial neural networks +and fuzzy logic [4]. +A neuro-fuzzy system is commonly known in the literature as a fuzzy neural network (FNN) +or a neuro-fuzzy system (NFS). A neuro-fuzzy system (used hereafter) incorporates the human +reasoning style of fuzzy systems through the use of fuzzy sets and a linguistic model consisting of +a set of fuzzy IF-THEN rules. The main strength of neuro-fuzzy systems is that they are universal +approximators, the result of which allows interpretation by fuzzy IF-THEN rules [2, 3, 4]. +The main specificity of neuro-fuzzy systems is the presence of two conflicting requirements for +fuzzy modeling: interpretability and accuracy. In practice, one of two requirements prevails. +As a consequence, the field of study of neuro-fuzzy systems is divided into two areas: linguistic +fuzzy modeling focused on interpretability, mainly the Mamdani model; and accuracy-oriented fuzzy +modeling, mainly the Takagi-Sugeno-Kangi (TSK) model. +A new line of research in the field of data flow mining considers the case when neuro-fuzzy +systems are constantly updated with new incoming data. The system’s response lies in its dynamic +updates, including not only recursive adaptation of model parameters, but also dynamic evolu- +tion and reduction of model components to adequately handle concept drift and keep the model +“relevant” at all times. Detailed reviews of various approaches to the development of neuro-fuzzy +systems can be found in [2] and [3]. +A neuro-fuzzy system is represented as special three-layer feedforward neural network (ANN) +where [4] +• The first layer corresponds to the input variables, +• The second layer symbolizes the fuzzy rules, +• The third layer represents the output variables, +• The fuzzy sets are converted as (fuzzy) connection weights. +The learning procedure is constrained to ensure the semantic properties of the underlying fuzzy +system. +39 + +40 +CHAPTER 7. FUZZY NEURAL NETWORKS +Both characteristics: interpretability and accuracy become relevant when the NFS has already +been successfully developed for solving a specific problem. This problem-oriented perspective ex- +poses the limitations of NFS modeled by artificial neural networks (ANNs) and raises the question: +can NFS be extended to the next generation of Convolutional Neural Networks (CNNs)? Below we +consider the main problems specific to neural network computing technology. +The main problems solved with the help of neural networks are classification and regression. +Other problems are their modifications. For example, semantic/instance segmentation is based on +pixel-wise classification; object detection is a regression on rectangle/polygon areas, time series +prediction is a regression, etc. +Let us discuss and compare the capabilities of ANN and CNN in solving these problems [6, +9]. +Both neural networks as computational models have a similar architecture with a common +step — feature extraction. The main difference is how they transform the input. From a CNN +point of view, feature extraction is a gradual process focused on data with spatial dependencies. +The convolution shifts its window over the data, which leads to invariance to data translation. +Convolutions gradually extract many complex and abstract features. The result of this stage is a +vector of descriptive features. +On the other hand, ANN feature extraction can be interpreted as a transformation of the input +space into a space more suitable for a given task, i.e., making data samples separable. Consequently, +ANN is typically used for data without spatial dependencies, such as tabular data. +The difference between ANN and CNN appears in different models of their computational units +– neurons. ANN neuron output a is given as +a = g(b + +� +i +wixi) = g(b + wx), +while the convolutional neuron output aij is given as +aij = g + b( +l +� +m=1 +l +� +n=1 +Wm,nxi+m,j+n), +where W is a convolutional kernel. +CNNs are currently the state-of-the-art models in all major computer vision tasks, from image +classification and object detection to instance segmentation [17, 8, 9]. CNNs combine three archi- +tectural ideas: local receptive fields to extract elementary features from images; shared weights to +extract the same set of elementary features from the entire input image and to lower computational +costs; local averaging and sub-sampling to reduce the resolution of feature maps. +Typically, CNNs are built as a sequence of convolutional layers and pooled layers to automat- +ically learn higher and higher level features [5, 9]. At the end of the sequence, one or more fully +connected layers are used to map the output feature map to the scores. +This structure entails complex internal relationships, which are difficult to explain using the +Mamdani or Takagi-Sugeno type fuzzy models discussed above. Fortunately, the path to explain- +ability for CNNs is easier than for other types of NN models, since human cognitive abilities +contribute to the understanding of visual data. If we agree that the interpretability of a model is +something that comes from the design of the model itself then [1] +an explainable AI is one that offers reasonable data processing details that make its +operation clear or easy to understand. + +BIBLIOGRAPHY +41 +With this observation in mind, we single out one particular fuzzy modeling technique, known as +fuzzy (F-)transforms, as a technique whose computational model is similar to the CNN model [14]. +It has been proven in many papers [10]–[14] that the higher degree F-transforms are univer- +sal approximators of smooth and discrete functions. The approximation on a whole domain is a +combination of locally best approximations called F-transform components. They are represented +by higher degree polynomials and parametrized by coefficients that correspond to average values +of local and nonlocal derivatives of various degrees. If the F-transform is applied to images, then +its parameters are used in regularization, edge detection, characterization of patches [15], [7], etc. +Their computation can be performed by discrete convolutions with kernels that, up to the second +degree, are similar to those widely used in image processing, namely: Gaussian, Sobel, Laplacian +[16]. Thus, we can draw an analogy with the CNN method of computation and call the parameters +of the higher degree F-transform features. Moreover, based on a clear understanding of these fea- +tures’ semantic meaning, we say that a CNN with the F-transform kernels extracts features with +a clear interpretation. In addition, the sequential application of F-transform kernels with an up to +the second degree gives average (nonlocal) derivatives of higher and higher degrees. +The following text details the neural network design supported by the theoretically proven F- +transform methodology. The LeNet-5 architecture was chosen as the prototype architecture, and a +new CNN called FTNet was compiled with kernels taken from the F-transform theory of the higher +degree. +The performance of FTNet was examined on several datasets and on them it converges faster +in terms of accuracy/loss than the baseline network, subject to the same number of steps. We +compared the F-transform kernels in the first layer before and after training. We observed that +the kernels remain unchanged. Moreover, their shapes are similar to the shapes of extracted kernel +groups from the most known CNNs: VGG16 [8], VGG19 [8], InceptionV3 [9], MobileNet [14], +ResNet [14], and AlexNet [17] as the representative examples of CNNs. +Acknowledgement +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001 +Bibliography +[1] A. B. Arrieta, N. D´ıaz-Rodr´ıguez, J. Del Ser, A. Bennetot, S. Tabik, A. Barbado, S. Garc´ıa, +S.Gil-L´opez, D. Molina, R. Benjamins, R. Chatila, F. Herrera, Explainable Artificial Intelli- +gence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI, In- +formation Fusion, 58 (2020) 82-115. +[2] Kosko, Bart (1992). Neural Networks and Fuzzy Systems: A Dynamical Systems Approach to +Machine Intelligence. Englewood Cliffs, NJ: Prentice Hall. ISBN 0-13-611435-0. +[3] Lin, C.-T., Lee, C. S. G. (1996). Neural Fuzzy Systems: A Neuro-Fuzzy Synergism to Intelligent +Systems. Upper Saddle River, NJ: Prentice Hall. +[4] Klawonn, F., Kruse R., Nauck, D. and Borgelt, C. (2003). Neuro-Fuzzy-Systeme (Vieweg, +Wiesbaden). + +42 +BIBLIOGRAPHY +[5] E.A. Popko, I.A. Weinstein, Fuzzy logic module of convolutional neural network for handwrit- +ten digits recognition, in Journal of Physics: Conference Series 738 (2016) 012123. +[6] O. Yazdanbakhsh, S. Dick, A deep neuro-fuzzy network for image classification, arXiv preprint +arXiv:2001.01686, 2019 +[7] X. Gastaldi, Shake-shake regularization, arXiv preprint arXiv:1705.07485, 2017. +[8] J.M. Ogden, E.H. Adelson, J.R. Bergen, P.J. Burt, Pyramid-based computer graphics, RCA +Eng. 30 (1985), 4–15. +[9] K. Simonyan, A. Zisserman, Very deep convolutional networks for large-scale image recognition, +arXiv preprint arXiv:1409.1556,2014. +[10] I. Perfilieva, Fuzzy transforms: Theory and applications, Fuzzy sets and systems, 157/8 (2006 +)993–1023. +[11] Perfilieva, I., Daˇnkov´a, M., Bede, B. Towards a higher degree F-transform. Fuzzy Sets and +Systems 2011, sv. 180, s. 3-19. ISSN 1063-6706. +[12] I. Perfilieva, M. Holcapek, V. Kreinovich, A new reconstruction from the F-transform compo- +nents, Fuzzy Sets and Systems, 288 (2016) 3–25. +[13] P. Hurtik, V. Molek, I. Perfilieva, Novel dimensionality reduction approach for unsupervised +learning on small datasets, Pattern Recognition, 103 (2020) 107291. +[14] V. Molek, I. Perfilieva, Deep Learning and Higher Degree F-Transforms: Interpretable Kernels +Before and After Learning, International Journal of Computational Intelligence Systems, 13/1 +(2020) 1404 - 1414. +[15] I. Perfilieva, P. Vlaˇs´anek, Total variation with nonlocal FT-Laplacian for patch-based inpaint- +ing, Soft Comput. 23 (2019), 1833–1841. +[16] G. Patane, Data-Driven Fuzzy Transform, IEEE Transactions on Fuzzy Systems, 30/9 (2022) +3774-3784. +[17] K.K. Pal, K.S. Sudeep, Preprocessing for image classification by convolutional neural networks, +in IEEE International Conferenceon Recent Trends in Electronics, Information & Communi- +cation Technology (RTEICT), IEEE, Bangalore, India, 2016, pp.1778–1781. + +Chapter 8 +Implementation of neural networks +- what has been done and what +can be done? +R.A. Kycia, P. Artiemjew +8.1 +Introduction +The unprecedented abundance of Artificial Neural Networks (ANN) applications observed in today’s +world has two main factors. +The first one is the growth of computing power related to more +advanced hardware progress, including tensor units. The second reason is software dependent - the +appearance of easy-to-use, high-level libraries that allows quick to implement various architectures +of ANN. These two factors make it easy to construct even very advanced structures and transform +the discipline from research to engineering. +There are many architectures of ANN, depending on connection structure. The summary of +various approaches is presented in the previous Chapter 2. +In real-life applications, the most common architecture is feed-forward ANN, which consists of +layers whose output preceding one serves as input to another layer. Each layer has its specific +properties. +For implementation, we focus mainly on Python since it is a leading language used in Machine +Learning and applications. +The simplest ’layer’ is a perceptron of McCulloch and Pitts [1]. It realizes the division of space +of data using a hyperplane (top minus one dimensional) into two disjoint classes - corresponding +to both sides of the hyperplane. Therefore, the data that are only hyperplane-separable can be +distinguished. The truth table for the XOR logic gate is not hyperplane(linearly)-separable, and this +was an input for the first AI winter. The perceptron algorithm is easily implementable from scratch, +and it was given in many sources, e.g., Chapter 2 of [14]. It is also an easy-to-use implementation +as a model Perceptron in Scikit-learn library [2]. +Currently, there is also an abundance of sources where there is a description of constructing +ANN from scratch [9]. However, to optimize computation, a more advanced approach that uses +43 + +44CHAPTER 8. IMPLEMENTATION OF NEURAL NETWORKS - WHAT HAS BEEN DONE AND WHAT CAN BE DONE? +dedicated libraries is usually needed. +8.2 +TensorFlow and PyTorch +The core of today’s applications of ANN uses two Open Source libraries1: +• TensorFlow [3] - released by Google. The current version 2.0 appeared in 2019. This library +is used with high-level API (Application Programming Interface) provided by Keras [4]. +• PyTorch [5] - released by Meta. +Both of them have similar capabilities and philosophies of work, although the architectures +of these frameworks are slightly different, mainly because of different algorithms used in various +functionalities. Both are based on multidimensional arrays, called tensors2. Tensors of these two +libraries can use CPU, and GPU computing capabilities, including cooperation with CUDA [6] that +highly increase the speed of computations. Tensors are structures that can efficiently carry layers +of ANN, and each layer is a table of weights for neurons. +Moreover, as discussed in the previous Chapters, the backpropagation algorithm both have +automatic differentiation modules (GradientTape for TensorFlow, and AutoGrad for PyTorch). +These libraries also contain various optimizers for backpropagation, as well as numerous addi- +tional features allowing preprocessing, postprocessing, and constructing the whole ANN architec- +ture. +The literature about the practical use of both libraries is vast. +Therefore we focus on two +introductory level books [14] for TensorFlow, and [15] for PyTorch. +Both libraries allow to define of custom architectures that employ framework functionality, e.g., +automatic differentiation or GPU capabilities. +8.3 +Non-standard architectures +We will focus on two examples, from the variety of possibilities, that employ the above libraries to +implement non-standard ANN. +The first one uses the PyTorch library to implement the Hopfield layer for the feed-froward +network, see [10, 11]. It seems that the Hopfield layer is quite universal and incorporates a memory +layer that enhances the attention mechanism. +The second implementation we chose is an architecture based on four-dimensional hypercomplex +algebra provided in [12] using TensorFlow [13]. The colors are effectively encoded using various +four-dimensional hypercomplex numbers. The approach shows exceptionally high accuracy in image +classification. +1One library, that, in a sense, was a predecessor and origin of modern libraries was Theano developed at Montreal +University[7, 8]. Currently, it is not popular outside of research applications. +2From the mathematical viewpoint, tensors are algebraic multilinear objects with specific laws of transformation +between different bases. This means that they are abstract entities independent of the choice of coordinate frame. +Under the particular choice of the frame, they have a form of multidimensional arrays. The notion of a tensor is +usually equivalent to the multidimensional array in computer science, and no specific law of transformation is implied. + +8.4. CONCLUSIONS +45 +8.4 +Conclusions +Artificial Neural Networks are fast-developing research and engineering areas. The current im- +plementation standards revolve around two main libraries: TensorFlow with Keras, and PyTorch. +Many others, including research, architectures are usually extensions of these two frameworks. +It is very difficult to predict the direction of development of this discipline, including new +architectures. However, there is still missing general principle of selecting given architecture of +ANN best suitable to analysing specific data under consideration. This is the ultimate goal of this +discipline. However, some deeper understanding of ANN at mathematical level is needed first. +Acknowledgments +This article has been supported by the Polish National Agency for Strategic Partnership under +Grant No. BPI/PST/2021/1/00031/U/00001. +Bibliography +[1] W.S. McCulloch, W. Pitts, A logical calculus of the ideas immanent in nervous activity, The +Bulletin of Mathematical Biophysics, 5(4):115–133, (1943) +[2] sklearn.linear model.Perceptron: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html, +Accessed: 16.12.2022 +[3] TensorFlow, https://www.tensorflow.org/, Accessed: 16.12.2022 +[4] Keras, https://keras.io/, Accessed: 16.12.2022 +[5] PyTorch, https://pytorch.org/, Accessed: 16.12.2022 +[6] CUDA, https://developer.nvidia.com/cuda-toolkit, Accessed: 16.12.2022 +[7] Theano, https://theano-pymc.readthedocs.io +[8] Theano Development Team, Theano: A Python framework for fast computation of mathemat- +ical expressions, arXiv: http://arxiv.org/abs/1605.02688 +[9] S. Weidman, Deep Learning from Scratch: Building with Python from First Principles, O’Reilly +Media, 2019 +[10] H. Ramsauer, B. Sch¨afl, J. Lehner, P. Seidl, M. Widrich, T. Adler, L. Gruber, M. Holzleitner, +M. Pavlovi´c, G.K. Sandve, V. Greiff, D. Kreil, M. Kopp, G. Klambauer, J. Brandstetter, S. +Hochreiter, Hopfield Networks is All You Need, https://arxiv.org/abs/2008.02217 +[11] GitHub Hopfield network implementation, https://github.com/ml-jku/hopfield-layers, +Accessed:16.12.2022 +[12] G. Vieira, M.E. Valle, Acute Lymphoblastic Leukemia Detection Using Hypercomplex-Valued +Convolutional Neural Networks, arXiv:https://arxiv.org/abs/2205.13273 (2022) + +46 +BIBLIOGRAPHY +[13] Hypercomplex-Valued Convolutional Neural Networks, https://github.com/mevalle/Hypercomplex-valued-Convolutional-Neural-Networks, +Accessed: 16.12.2022 +[14] S. Raschka, V. Mirjalili, Python Machine Learning, Packt Publishing, 3rd edition, 2019 +[15] S. Raschka, V. Mirjalili, V. Mirjalili, Machine Learning with PyTorch and Scikit-Learn, Packt +Publishing; 1st edition, 2022 + diff --git a/wtAyT4oBgHgl3EQfOfaf/content/tmp_files/load_file.txt b/wtAyT4oBgHgl3EQfOfaf/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..fd5fecdeb3a863798d0160bfb0766d1e5e9e2469 --- /dev/null +++ b/wtAyT4oBgHgl3EQfOfaf/content/tmp_files/load_file.txt @@ -0,0 +1,2445 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf,len=2444 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='00007v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='LG] 29 Dec 2022 Selected aspects of complex, hypercomplex and fuzzy neural networks edited by Agnieszka Niemczynowicz1 and Rados�law A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia2,3 1Faculty of Mathematics and Computer Science, University of Warmia and Mazury in Olsztyn, Poland 2Faculty of Computer Science and Telecommunications, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ko´sciuszko Cracow University of Technology, Krak´ow, Poland 3Faculty of Science, Masaryk University, Brno, Czechia January 3, 2023 2 Contents 1 Introduction 5 2 Biological inspiration of artificial neural networks 7 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Niemczynowicz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Jaworski 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Perceptron .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Multilayer neural networks and backpropagation algorithm .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Current state of development .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Summary .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 9 3 Classical architecture of artificial neural networks 11 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Niemczynowicz, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Jaworski 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 11 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Encoding of data .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 11 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Multilayer ANN .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 12 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Other architectures .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 13 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Summary .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 13 4 Dynamical systems approach to artificial neural networks 15 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Siemaszko 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 15 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Biological Neural Networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 15 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Physics-motivated Neural Networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 16 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Modelling Artificial Neural Networks .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 16 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Modelling ANN work .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 16 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Modelling of learning process .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Neural ODEs .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Dynamical Models predicted by ANN .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6 Conclusions .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 17 5 Neural networks as universal approximators 21 J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Calabuig, Ll.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Garc´ıa-Raffi 3 4 CONTENTS 6 Complex and quaternionic neural networks 25 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Schneider, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Berseghyan 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 25 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Elements of quaternionic analysis .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 26 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Poincar´e-Bertrand formula for ψ-hyperholomorphic singular integrals .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 28 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Poincar´e-Bertrand formula for the Cauchy-Cimmino singular integrals .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 31 7 Fuzzy neural networks 39 I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfiljeva, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Novak 8 Implementation of neural networks - what has been done and what can be done?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 43 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Artiemjew 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 43 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 TensorFlow and PyTorch .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 44 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Non-standard architectures .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 44 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Conclusions .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 45 Chapter 1 Introduction This short report reviews the current state of the research and methodology on theoretical and practical aspects of Artificial Neural Networks (ANN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It was prepared to gather state-of-the-art knowledge needed to construct complex, hypercomplex and fuzzy neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The report reflects the individual interests of the authors and, by now means, cannot be treated as a comprehensive review of the ANN discipline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Considering the fast development of this field, it is currently impossible to do a detailed review of a considerable number of pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The report is an outcome of the Project Meeting1 at the University of Warmia and Mazury in Olsztyn, Poland, organized in September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The contributors of the report are (in order of appearance): A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Niemczynowicz, UWM Olsztyn R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, CUT Cracow & MUNI Brno M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Jaworski, CUT Cracow A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Siemaszko, UWM Olsztyn J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Calabuig, UPV Valencia Ll.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Garc´ıa-Raffi, UPV Valencia B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Schneider, UO Ostrava D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Berseghyan, UO Ostrava I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfiljeva, UO Ostrava V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Novak, UO Ostrava P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Artiemjew, UWM Olsztyn 1Project: ’The Strategic Research Partnership for the mathematical aspects of complex, hypercomplex and fuzzy neural networks’ 5 6 CHAPTER 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' INTRODUCTION Acknowledgement The project has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chapter 2 Biological inspiration of artificial neural networks R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Niemczynowicz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Jaworski 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction The abilities of the human brain always inspired scientists to mimic its abilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' There are plenty of functionalities offered by this tissue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We can mention a few: Pattern recognition - recognition of objects, sounds, smells, touch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Classification - distinguish similar objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Generation - digesting the input data and generating new output: movement, sound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The structure of the brain at microscopic size was not revealed until the invention of the micro- scope and discovery of microorganisms by Van Leeuwenhoek in 1676.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The distinction of neuron cells as a basic building block of neurons has also a long history that evolves with the ideas of how the neural tissue works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However the neuron theory, so called Neuron Doctrine, was proposed by San- tiago Ram´on y Cajal (1852-1934).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Since then the detailed study of biological and electrochemical properties of neurons were performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' There are many different kinds of neurons depending on which part of the neural system we are analyzing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The complexity of the problem arises due to the complex structure of neurons and their mutual interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The mathematical description of the neuron in terms of, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', dynamical systems has a long history [8] and grows into the discipline of computational neurobiology [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This also inspired computer scientists to use similar structures as neurons and nervous systems for computational tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In this section we present the history of these early attempts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The first attempt was the model of a single neuron as discussed below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 7 8 CHAPTER 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BIOLOGICAL INSPIRATION OF ARTIFICIAL NEURAL NETWORKS 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Perceptron The mathematical ideas behind connection of neurons was presented in [9], and the first implemen- tation of an artificial neuron, called perceptron, was designed at Cornell Aeronautical Laboratory by F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Rosenblatt as an electrical circuit and described in the report [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, in the appendix of the report the mathematical equations for the perceptron are provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In the simplest version the perceptron is able to distinguish two sets of data that can be separated by a linear hyperplane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In mathematical terms the input contains a n-dimensional vector of real numbers Xi = [x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' , xn], and the associated to this feature that can be coded into a number fi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The perceptron is characterized as a vector of n weights (real numbers) w = [w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' , wn] and an additional number w0 called the bias unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In order to make the computations more uniform the input vetor is extended to Xi = [1, x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' , xn] and the weight vector to W = [w0, w1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' , wn].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The next element of the perceptron is a decision function, which can be the Heaviside step function θ(x) = 1 if x > 0, θ(0) = 0 and θ(x) = −1 if x < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then the data are classified with respect to the side of the hyperplane given by the equation xw = 0, by y = θ(xw) function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The position of the plane (weights w) are taken such that to minimize the distance between the features fi and corresponding outputs yi = θ(WXi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The implementation of the perceptron can be found in every standard machine learning books, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In the very core of the perceptron idea is that the effective classification is possible if the data are linearly separable, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', separable by a hyperplane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This idea was proved in [11] and is related to the XOR problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Since the XOR gate can be recognized as a function that takes as an input two bits and returns the one bit, the input data for the function can be treated as input data for perceptron, and the features can be output of XOR function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It is obvious that these data are not linearly separable, and therefore, there is no possibility that perceptrons separate features 0 from 1 be a hyperplane (in fact, a line on the plane).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The ideas in the book were influential enough to initiate, so called, (first) AI winter, the period around 1980s where the perceptron idea was put on hold in favor of other AI architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Multilayer neural networks and backpropagation algo- rithm The problem of not linearly separated data was proven to be solved by showing that the connection of a few perceptrons in a network called neural network, and learning by back-propagating the error from the output through the network updating the weights is a solution of XOR problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' These ideas were described in [15] and started a renovation of the interests in artificial neurons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, due to insufficient computing power needed for backpropagation algorithm the practical works were stalled up to 2010s and this period is called second AI winter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The appearance of connected artificial neurons gave rise to the new paradigm of computing called connectionism that computing can be included in the topology (graph of connections between neurons in a ANN), see [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Current state of development Since the 2010s the increased interest in deep learning, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', use of neural networks for practical computational tasks is reviving.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This situation is due to an increasing number of examples where 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' SUMMARY 9 neural networks can handle data achieving better performance than classical algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This progress is also induced by the high interests in deep learning from the biggest IT companies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The most common architectures used in applications are multilayer neural networks, where neurons are grouped in layers and each layer’s output is an input to another layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The simplification of construction of multilayer networks was provided by the appearance of two powerful OpenSource libraries: TensorFlow (Google Brain Team [5]) - donated by Google;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For common use it is accessed by Keras frontend [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch (Meta AI [10]) - donated by Adam Paszke, Sam Gross, Soumith Chintala, and Gregory Chanan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' These two libraries have enormous infrastructure that was built around them and are considered as an industrial standard of deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' As for the theoretical side of description of the neural networks, the insight in the idea of work of NN is expressed in, so called, Universal Approximation Theorems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The first was proved by G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Cybenko in 1989 [3] for sigmoidal activation function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It was soon realized that the approximation properties of neural networks rest in multilayer (feedforward) architecture [7], [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Roughly stated, Universal Approximation Theorem says that for a specific topology (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', for arbitrary width and bounded depth) the output functions of feed-forward neural networks are dense in the space of continuous functions on a compact space and with supremum norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Recent research in this direction focuses on estimating the optimal width and depth of layers to obtain best approximation properties, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [13] for further references.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' There are many unresolved issues related to the work of ANN that are related to the issue of which functions can be approximated by ANN or the properties of learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They are summarized in the review article [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' As it was pointed out, at the current state of understanding there are still many unknowns however the big picture starts to emerge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Summary Currently, the deep learning progress is motivated by numerous applications starting from com- puter vision, natural language processing, self-driving cars, and ending to generation of multimedia (pictures sounds), or design pharmaceutics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The development is pushed by a big industry that has access to great computing powers needed to run learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, the theoretical description, improvements of algorithms, or construction of nonstandard (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', not multilayers) ar- chitecture is still possible within the limiting computing resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' As of 2022 it is still an active and promising area of research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgements This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001 10 BIBLIOGRAPHY Bibliography [1] Buckner, Cameron, and James Garson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Connectionism (Stanford Encyclo- pedia of Philosophy).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Stanford Encyclopedia of Philosophy, 18 May 1997, https://plato.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='stanford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='edu/entries/connectionism/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022 [2] Chollet, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Keras library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Keras: the Python deep learning API, https://keras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='io/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022 [3] Cybenko, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Approximation by superpositions of a sigmoidal function, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Control Sig- nal Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 1989, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 303–314.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1007/BF02551274, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1007/BF02551274 [4] Weinan, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Towards a Mathematical Understanding of Neural Network-Based Ma- chine Learning: what we know and what we don’t, unpublished, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 2020, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='10713 [5] Google Brain Team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' TensorFlow library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' TensorFlow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org, 2015, https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='tensorflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022 [6] Hornik, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Multilayer feedforward networks are universal approximators, Neural Net- works, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2, 1991, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 251-257.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1016/0893-6080(91)90009-T, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1016/0893-6080(91)90009-T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [7] Hornik, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Multilayer feedforward networks are universal approximators, Neural Net- works, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 5, 1989, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 359-366.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1016/0893-6080(89)90020-8, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1016/0893-6080(89)90020-8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [8] Izhikevich, Eugene M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Dynamical Systems in Neuroscience: The Geometry of Excitability and Bursting, Penguin Random House LLC, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [9] McCulloch, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A Pitts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A logical calculus of the ideas immanent in ner- vous activity, Bulletin of Mathematical Biophysics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 5, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 1943, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 115–133, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1007/BF02478259.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [10] Meta AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org, https://pytorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [11] Minsky, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Papert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perceptrons: and introduction to computational geometry, MIT Press, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [12] Mirjalili, Vahid, and Sebastian Raschka.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Python Machine Learning: Machine Learning and Deep Learning with Python, Scikit-learn, and TensorFlow 2, Packt Publishing, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [13] Park, Sejun, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Minimum Width for Universal Approximation, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 2020, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='08859, https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='08859.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [14] Rosenblatt, Frank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The Perceptron – a perceiving and recognizing automaton, Report 85-460-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Cornell Aeronautical Laboratory, 1957.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [15] Rumelhart, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Learning representations by back-propagating errors, Nature, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 323, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 1986, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 533–536;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1038/323533a0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [16] Trappenberg, Thomas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Fundamentals of Computational Neuroscience, OUP Oxford, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chapter 3 Classical architecture of artificial neural networks A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Niemczynowicz, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Jaworski 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction Artificial Neural Networks (ANN) and Deep Learning discipline are currently the one of the most active fields of research in computer science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This activity is also inspired by a large demand of IT business for new solutions and architectures that are suitable for solving new problems or solving more efficiently problems that are currently solvable by classical algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Apart from various architectures, there is an issue of how to encode data to make them acceptable as an input to ANN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The chapter is organized as follows: in the next section we acknowledge standard input data for ANN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then we review typical structures of multilayers ANN, which are currently the most used architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Finally we make a list of nonstandard architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Encoding of data There are various types of data in the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Many of them have standard ways of encoding to the form that is suitable for ANN processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We provide an example, and by no means extinguishable list of data types: Multidimensional numerical data that are transformable to vector or matrix forms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They are described in various Machine Learning books, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [11] Images - transformable to matrix representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' See, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [16] Text data - transformable to vectors of words, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', BagOfWords, TFIDF vectors, transformers based on neural networks;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [9] Graph data - represented as, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', incidence matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' See, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [3] 11 12 CHAPTER 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' CLASSICAL ARCHITECTURE OF ARTIFICIAL NEURAL NETWORKS 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Multilayer ANN The typical architecture used in industry is a (multilayer) feedforward architecture that consists of layers of neurons where output of one layer is the input of another one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The acceleration in this direction was heavily induced by the appearance of two Open Source libraries that allow the construction of complex ANN architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' These libraries are: TensorFlow ([6]) - donated by Google company.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch ([10]) - donated by Adam Paszke, Sam Gross, Soumith Chintala, and Gregory Chanan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The general nomenclature in such architectures is as follows: Input layer - layer that gets the input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Output layer - layer that returns the result of processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hidden layers - all layers between Input and Output layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Within these frameworks are possible various multilayer architectures and processing capabilities that will be outlined below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Fully connected multilayer ANN - this is a network where all neurons in a layer are connected with all neurons in neighborhood layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Advantage of this architecture is that all neurons ‘see’ the whole output of the preceding layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The main disadvantage of this simple architecture is that the number of connections in ANN grows enormously with the increase of the number of neurons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Convolutional ANN (CNN) - the convolution is a mathematical operation of connecting neighbor input data (words when processing text, pixels when processing images) to feed neurons with more complete yet local information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This makes sense when the data can be treated as elements of topological space where there is some notion of closure that represents some real objects, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', a group of neighbor pixels can represent a dog or bird;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' the context of the sentence is represented by a sentence of words usually in close proximity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The convolution in general is a tool for grouping ‘close’ data together at the input, and moreover to provide some notion of group invariance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In typical applications the convolution is realized by the kernels that are discrete versions of translational- invariant functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, the general idea is related to equivariance with respect to group action [4] et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In typical architecture of CNN the first few layers are convolutional layers that combine data and as a result reduce dimensionality of the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Recurrent Neural Networks (RNN) [13] - these networks can be described as a discrete dynamical system with feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The network is feeded by sequence of data and the output (of hidden layers) form the previous step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This kind of “recursive processing” allows the network to see correlations between data from different steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Therefore such networks are ideal for text processing or time series predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The big drawback of this architecture is the complicated process of learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Since the learning process is iterative the gradient used in backward propagation algorithm is computed many times and this can makes it extremely small or to blow up due to numerical manipulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This is the so-called vanishing and exploding gradient problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' These are typical problems with gradient-based learning algorithms when the number of layers increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hopfield neural networks are a special kind of RNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Long Short-Term Memmory ANN (LSTM) [7] - this is the network where each neuron has its own software memory unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The processing can be represented as a sequence of steps and the input form 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' OTHER ARCHITECTURES 13 the previous steps is used to modify output values by means of the use of the memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' LSTM can be used in processing data where connection between different portions of data is important.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Encoder- Decoder architecture [15]- this rather more abstract architecture consists of two neural networks one for coding data and the second for encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The output is the output from the decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This neural network is designed for coding of sequences, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', translating from one language to the other one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Wehn processing large volumes of data (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' text) the decoder can lose the main purpose of processing, and therefore the attention mechanism was invented [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Generative Adversarial Network (GAN) [5] - is also an architecture consisting of G (the genera- tive model) and D (the discriminative model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They are learned in tandem where D estimates the probability that the output comes from the training data rather than from G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This ends our non-inclusive overview of typical architectures used in typical industrial applica- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In the next section we present some other architectures that are used on smaller scale or in the research on ANN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Other architectures We can distinguish: Hopfield neural networks [8], [12] - they are modeled on a physical system of spins on a lattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Due to these similarities statistical physics methods can be widely applied for this architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Boltzmann machines [14] - is another spin-based approach to neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ’Algebraic Neural Networks’ - under this title we collected the typical multilayer architec- tures, where computation is done using real numbers instead of real numbers, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', complex numbers, various Clifford algebras, and Hypercomplex algebras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This is currently a vast field of theoretical and practical research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' An introduction to the research in this direction is presented in [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Summary Due to many and still growing number of applications, ANN is an active and extremely promising research area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Therefore each summary is burdened with incompleteness and risk of fast outdating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In this chapter, the general overview of current architectures of ANN was presented with short characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgement This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001 Bibliography [1] Arena, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Networks in Multidimensional Domains: Fundamentals and New Trends in Modelling and Control, Springer, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 14 BIBLIOGRAPHY [2] Bahdanau, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Machine Translation by Jointly Learning to Align and Translate;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' arXiv: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/1409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='0473.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [3] Cui, Peng, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', editors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Graph Neural Networks: Foundations, Frontiers, and Applications, Springer Nature Singapore, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [4] Finzi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Generalizing Convolutional Neural Networks for Equivariance to Lie Groups on Arbitrary Continuous Data, arXiv:https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12880.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [5] Goodfellow, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='Generative Adversarial Nets, Proceedings of the International Confer- ence on Neural Information Processing Systems (NIPS 2014), vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 2014, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2672–2680.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [6] Google Brain Team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' TensorFlow library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' TensorFlow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org, https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='tensorflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ac- cessed 22 September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [7] Hochreiter, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Schmidhuber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Long Short-Term Memory, Neural Computation, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8, 1997, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1735–1780, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1162/neco.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1735.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [8] Hopfield, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural networks and physical systems with emergent collective computational abilities, Proceedings of the National Academy of Sciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 79, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8, 1982, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2554–2558 https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2554.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [9] Lane, Hobson, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Natural Language Processing in Action: Understanding, Analyzing, and Generating Text with Python, Manning, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [10] Meta AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org, https://pytorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [11] Raschka, Sebastian, and Vahid Mirjalili.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Python Machine Learning: Machine Learning and Deep Learning with Python, Scikit-learn, and TensorFlow 2, Packt Publishing, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [12] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ramsauer, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sch¨afl, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lehner, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Seidl, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Widrich, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Adler, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Gruber, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Holzleitner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Pavlovi´c, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sandve, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Greiff, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kreil, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kopp, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Klambauer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Brandstetter, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hochreiter, Hopfield Networks is All You Need, https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='02217 [13] Rumelhart, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Learning representations by back-propagating errors, Nature, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 323, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1, 1986, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 533–536, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1038/323533a0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [14] Sherrington, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kirkpatrick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Solvable Model of a Spin-Glass, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 35, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 26, 1972, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1792–1796, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1792.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [15] Sutskever, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sequence to Sequence Learning with Neural Networks, arXiv:https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/1409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3215.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Accessed 22 September 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [16] Tripathi, Suman Lata, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', editors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Machine Learning Algorithms for Signal and Image Processing, Wiley, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chapter 4 Dynamical systems approach to artificial neural networks R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Siemaszko 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction Data processing in Neural Networks (biological and artificial) can described as a time-dependent phenomenon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The mathematical tool to describe the change of a system in time is offered by Dynamical Systems: Smooth Dynamical systems – for a continuous time parameter usually ranging from a connected subset of R, or Discrete Dynamical Systems – discrete time steps, varying over a subset of Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Therefore, it is natural to ask if these complex systems can be described by the tools offered by Dynamical Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Currently, there are many directions in which the disciplines of Dynamical Systems and Artificial Neural Networks interpenetrate each other, and in this report, we indicate some of these directions in this fast-pacing field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Biological Neural Networks To understand the motivation for applying the Dynamical Systems approach to Artificial Neural Networks (ANN), we will briefly overview the modeling of biological neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This is a vast subject of Dynamical Neuroscience (’neurodynamics’), see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [8], or Chapter 21 of [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Even a single biological neuron is a very complicated electro-biochemical system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The main focus is on modeling neuron excitations – when an electrochemical impulse passes some threshold, then the neuron ’fires’, producing a sequence of spikes in voltage transmitted to other neurons by interconnectors called synapses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This modeling must take into account the self-sustaining states of inactivity and this producing spikes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In terms of Dynamical Systems, they can be modeled by limit cycles (attracting or repelling).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The standard model for describing these phenomena is a Hodgkin- Huxley model, a four-dimensional model for cell membrane voltage, sodium and potassium densities in a cell, and so-called leakage gating [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The phenomenon of oscillation between inactive and spiking states inspired some researchers to base the computation on such oscillatory behavior, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [2, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Moreover, the threshold behavior 15 16CHAPTER 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' DYNAMICAL SYSTEMS APPROACH TO ARTIFICIAL NEURAL NETWORKS was adapted in the first model of a neuron – the perceptron [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Physics-motivated Neural Networks One of the systems that can be significantly investigated using the qualitative and quantitative methods of Dynamical Systems is the Hopfield Neural Network [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The system is modeled on the crystal lattice of spins, and powerful techniques of statistical physics are accessible for solving its parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They allow us to estimate the memory capacity and stability of memorized patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For example, for the continuous version of the Hopfield Model, the stability can be analyzed using a suitable Lyapunov function, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Chapter 20 of [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This network model was developed into a core layer in a multilayer feed-forward ANN [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The other physics-inspired model on spin glass is called the Boltzmann machine [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In this model also the techniques of statistical physics can be applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Modelling Artificial Neural Networks Feed-forward multilayer ANN dominates current practical applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The mathematical under- standing of their work as a whole has yet to be provided, however, some progress is made [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The current trends of using Dynamical Systems theory to describe ANN focus on various direc- tions, some of which we summarize in the following subsections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Modelling ANN work The description of ANN using continuous Dynamical Systems is a new idea [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In principle, the multilayer ANN is a discrete Dynamical System, where we have two consecutive steps - performing a linear operation on the output from the previous step1, and applying a nonlinear function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We can now devise the idea to model such a network by a continuous Dynamical System.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The original network is recovered by doing a discretization of the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This approach is more flexible since powerful mathematical techniques are at our disposal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The problem of regression using the ODE approach can be formulated as follows [17]: Consider the differential equation dz dt = f(A(t), z), z(0) = x, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) where z and f are Rd-valued functions, A is a control that need to be found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The solution of this problem z(t), under linear transformation u(x) = az(x)+b, for real parameters a ∈ Rd, b ∈ R, must fit the data y(x), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', to minimize the distance ||y(x) − u(x)|| in a suitable norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The information about the structure of ANN is contained in the function f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The problem of the existence of control at the level of Dynamical Systems is transferred in this approach to the question if the structure of ANN is suitable for modeling the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Especially interesting in terms of Dynamical Systems are Residual Neural Networks, which can be brought to discrete dynamical system [17], and in some cases, this system can be modeled as the Euler scheme for integrating ODEs [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Moreover, the Residual Model can be rewritten as a control problem of transport equation and then rewritten as a PDE on manifold [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1The initial step is fed by the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' DYNAMICAL MODELS PREDICTED BY ANN 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Modelling of learning process The other aspect of the Dynamical System approach is the way how they model the learning algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The ANN learning algorithm aims to find a minimum2 for a loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This problem is usually solved by a gradient descent (GD) method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This method in hydrodynamical limit and using mean-field approximation [16, 5], can be converted into a gradient flow of ANN weight on a manifold with the Wasserstein metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This provides new mathematical tools for determining the convergence of DG methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In general, for the continuous approach to Learning ANN, one obtains nonlinear parabolic PDEs, where all tools from their theory, including optimal choice of function space, variational calculus, finding an approximate solution, analyzing the stability and attractors, can be applied, for reference see [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Another approach to learning is the so-called Deep Equilibrium Model [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In this approach, the learning is attained by finding the equilibrium of a Dynamical System that describes ANN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Neural ODEs Another approach in modeling ANN with ODEs are Neural ODEs, the concept presented in [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The idea behind the model is to make the layers continuous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then the propagation through the network can be described by ODE and not a difference equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This opens an opportunity to apply adaptive ODE solvers for learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The drawback of this approach is the limited approximation capabilities of these architectures, as described in [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Dynamical Models predicted by ANN The opposite direction of using ANN to model and control Dynamical Systems is currently a vast field of research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We do not pretend to review this field and only provide a reference of review book [15] instead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6 Conclusions Currently, the Artificial Neural Networks and Dynamical Systems theory merge, benefiting both disciplines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We presented some current trends in this direction, however, the full review is impossible due to the high volume of results appearing each term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgments This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2In an ideal situation, it should be a global minimum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 18 BIBLIOGRAPHY Bibliography [1] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bai, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kolter, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Koltun, Deep equilibrium models, Advances in Neural Information Processing Systems 32 (2019) [2] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Borresen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lynch, Neuronal computers, Nonlinear Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Theory, Meth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' and Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', 71 2372–2376 (2009) [3] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Borresen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lynch, Oscillatory threshold logic, PLoS ONE 7(11): e48498.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1371/journal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='pone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='0048498 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [4] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chen, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Neural ordinary differential equations, Advances in neural information processing systems 31 (2018) [5] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chizat, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bach, On the global convergence of gradient descent for over- parameterized models using optimal transport.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In Advances in neural information processing systems, pages 3036–3046, (2018) [6] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hodgkin and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Huxley, A qualitative description of membrane current and its appli- cation to conduction and excitation in nerve, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Physiol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 117 500–544, (1952) [7] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hopfield, Neural networks and physical systems with emergent collective computational abilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 79 (8): 2554–2558 (1982) [8] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Izhikevich, Dynamical Systems in Neuroscience: The Geometry of Excitability and Burst- ing, MIT Press, 2010 [9] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Le, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Zhong, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Li, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Dong, Beyond Finite Layer Neural Networks: Bridg- ing Deep Architectures and Numerical Differential Equations,Proceedings of the 35th International Conference on Machine Learning, PMLR 80:3276–3285 (2018);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' arXiv: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='10121 [10] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Li, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Shi, Deep Residual Learning and PDEs on Manifold, arXiv: 1708.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='05115v3[cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='IT] [11] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lynch, Dynamical Systems with Applications using Python, Springer 2018 [12] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' McCulloch, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Pitts, A logical calculus of the ideas immanent in nervous activity, The Bulletin of Mathematical Biophysics, 5(4):115–133, (1943) [13] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ramsauer, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sch¨afl, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lehner, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Seidl, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Widrich, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Adler, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Gruber, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Holzleitner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Pavlovi´c, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sandve, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Greiff, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kreil, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kopp, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Klambauer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Brandstetter, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hochreiter, Hopfield Networks is All You Need, https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='02217 [14] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sherrington, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kirkpatrick, Solvable Model of a Spin-Glass, Physical Review Letters, 35 (35): 1792–1796, (1975) [15] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Tiumentsev, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Egorchev, Neural Network Modeling and Identification of Dynamical Sys- tems, Academic Press, 2019 [16] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Weinan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lei, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Wojtowytsch, Towards a Mathematical Understanding of Neural Network-Based Machine Learning: What We Know and What We Don’t, CSIAM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', 1 , 561-615, (2020) BIBLIOGRAPHY 19 [17] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Weinan, A Proposal on Machine Learning via Dynamical Systems, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 5:1–11 (2017) [18] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Wienan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ma, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Wu, Machine learning from a continuous viewpoint, I, Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' China Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 63, 2233–2266 (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' DOI: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1007/s11425-020-1773-8 [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Zhang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Gao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Unterman, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Arodz, Approximation Capabilities of Neural ODEs and Invertible Residual Networks, Proceedings of the 37th International Conference on Machine Learning, PMLR 119:11086-11095, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 20 BIBLIOGRAPHY Chapter 5 Neural networks as universal approximators J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Calabuig, Ll.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Garc´ıa-Raffi Since the first golden age (the 1950s and 1960s) when in 1962, Frank Rosenblatt introduced and developed the perceptron, Artificial Neural Networks (ANNs) have gone through various stages ranging from enthusiasm to ostracism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' When we talk about ANNs, we are talking about math- ematical tools that play an important role in approximation and classification problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' From a mathematical point of view, a natural question that arises is whether Artificial Neural Networks are universal approximators in the sense of mathematics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This question, which may seem trivial or second-order in view of the applications of ANNs in applied problems, is nevertheless a central issue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In essence, the certainty of the results achieved in practical problem solved with Artificial Neural Networks rests on the certainty that they are universal approximators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' To find the first answer to this question we have to go back to the work of Cybenko and Hornik [1, 2] where basically it is proved that a feed-forward Neural Networks with at least one hidden layer can approximate any con- tinuous function assuming that certain activation functions are used (sigmoid activation function).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Since then, and as new network topologies emerged with new activation functions, an important theoretical effort have been done in order to prove the character of universal approximators of ANN [3, 4, 5, 6, 7, 8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Within the question of whether an ANN can approximate a (continuous) function there are two issues to be addressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' On the one hand, there is the Hornik/Cybenko issue which corresponds to the question about if some ANN can approximate a given (continuous) function to arbitrary precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, neither the result nor the proof of it give any indication of how “large”ANNs need to be to achieve a certain approximation accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then, another issue to be addressed is how many layers and how many neurons per layer an ANN requires, that is, the approximation rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A distinction must be made between shallow learning and deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In [9] authors study and proof approximation results for ANN with general activation func- tions: a two layer Neural Network with a polynomially-decaying non-sigmoidal activation function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They extend the results for a larger class of activation functions, removing the polynomial decay assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This result applies to any bounded, integrable activation function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In [10] authors address the study of the approximation of continuous functions with very deep 21 22 BIBLIOGRAPHY networks using the activation function RELU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In this case, not narrow networks (a high number of neurons per layer) are considered and authors prove that constant-width fully-connected networks of depth of the order of the number of weights provide the fastest possible approximation rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In [11] the narrow case is addressed, that is, networks of bounded width and arbitrary depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Specially interesting is the work [12] that address the super-narrow case, that is, with only two neurons per layer, showing that given enough layers, a super-narrow Neural Network, with two neurons per layer, is capable to separate any separable binary dataset and if the datasets exhibit certain type of symmetries, they are better suited for deep representation and may require only few hidden layers to produce desired classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Less literature is found on the consideration of non-standard activation functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, this is a field to be explored in order to obtain networks that are narrow, with a medium level of depth and a suitable approximation rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Note that, for example, in traditional convolutional networks applied to the reconstruction of medical images (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Nuclear Magnetic Resonance Imaging MRI), the number of weights (neurons+layers) is usually in the order of millions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In short, these are free parameters in our model and therefore any reduction in their number generates more robust and simpler mathematical models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' One of the natural extensions to changing the activation function is to consider that the image of the function is not in R but in C [20, 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This is the case of complex and hypercomplex- valued Nerural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Beyond being a simple generalization of real-value activation functions, Complex-Valued Neural Networks (CVNNs) are specially suitable to deal with modelling problems of complex amplitude –amplitude and phase– the kind of problems that are in the core of wave physics (electromagnetism, light, sound/ultrasounds, and matter waves).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' CVNNs give an important advantage in practical applications in fields where signals are massively analyzed and processed in time/space, frequency, and phase domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hyper-complex ANN as quaternion and Clifford Neural Networks are further extension of CVNNs ([16, 15, 17, 14, 18, 19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They seems to be specially suit- able in color-information treatment, image reconstruction and segmentation, robotics and systems control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The question about the character as universal approximates and the approximation rates of CVNNs is currently the subject of investigation [21], cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgement This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001 Bibliography [1] Cybenko, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Approximation by superpositions of a sigmoidal function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Mathematics Of Con- trol, Signals And Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2, 303-314 (1989,12,1), doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1007/BF02551274 [2] Hornik, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Stinchcombe, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & White, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Multilayer feedforward net- works are universal approximators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2, 359-366 (1989), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/0893608089900208 BIBLIOGRAPHY 23 [3] Leshno, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Lin, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Pinkus, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Schocken, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Multilayer feedforward networks with a non- polynomial activation function can approximate any function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 6, 861-867 (1993), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S0893608005801315 [4] Pinkus, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Approximation theory of the MLP model in neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acta Numerica.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 143–195 (1999) [5] Zhou, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Universality of deep convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ap- plied And Computational Harmonic Analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 48, 787-794 (2020), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S1063520318302045 [6] Sch¨afer, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Zimmermann, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Recurrent Neural Networks Are Universal Approximators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Artificial Neural Networks – ICANN 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 632-640 (2006) [7] Heinecke, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Ho, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Hwang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Refinement and Universal Approximation via Sparsely Connected ReLU Convolution Nets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' IEEE Signal Processing Letters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 27 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1175-1179 (2020) [8] Br¨uel Gabrielsson, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Universal Function Approximation on Graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Advances In Neural In- formation Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 33 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 19762-19772 (2020) [9] Siegel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Xu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Approximation rates for neural networks with gen- eral activation functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 128 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 313-321 (2020), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S0893608020301891 [10] Yarotsky, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Optimal approximation of continuous functions by very deep ReLU net- works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proceedings Of The 31st Conference On Learning Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 75 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 639-649 (2018,7,6),proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='mlr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='press/v75/yarotsky18a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='html [11] Kidger, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Lyons, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Universal Approximation with Deep Narrow Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proceed- ings Of Thirty Third Conference On Learning Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 125 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2306-2327 (2020,7,9), proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='mlr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='press/v125/kidger20a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='html [12] Szymanski, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & McCane, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Deep, super-narrow neural network is a universal classifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The 2012 International Joint Conference On Neural Networks (IJCNN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1-8 (2012) [13] Kobayashi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Complex-valued Hopfield neural networks with real weights in synchronous mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neurocomputing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 423 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 535-540 (2021), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S092523122031660X [14] Kobayashi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bicomplex-valued twin-hyperbolic Hopfield neu- ral networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neurocomputing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 434 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 203-210 (2021), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S092523122032021X [15] Kobayashi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Fixed points of split quaternionic hopfield neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Signal Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 136 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 38-42 (2017), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S0165168416303346, Hypercomplex Signal Processing [16] Kobayashi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Symmetric quaternionic Hopfield neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neurocomputing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 240 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 110-114 (2017), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S0925231217303351 [17] Parcollet, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Morchid, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Linar`es, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A survey of quaternion neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Artificial Intelligence Review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 53, 2957-2982 (2020,4,1), doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1007/s10462-019-09752-1 24 BIBLIOGRAPHY [18] Vieira, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Valle, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A general framework for hypercomplex-valued extreme learning ma- chines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Journal Of Computational Mathematics And Data Science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 3 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 100032 (2022), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S2772415822000062 [19] Da Cunha, ´E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Da Fontoura Costa, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' On hypercomplex networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Phys- ica A: Statistical Mechanics And Its Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 591 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 126714 (2022), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S0378437121009298 [20] Nitta, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' An Extension of the Back-Propagation Algorithm to Complex Numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 10, 1391-1415 (1997), www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/science/article/pii/S0893608097000361 [21] Voigtlaender, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The universal approximation theorem for complex-valued neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ArXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' abs/2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='03351 (2020) [22] Vital, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Vieira, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' & Valle, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Extending the Universal Approximation Theorem for a Broad Class of Hypercomplex-Valued Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (arXiv, 2022), arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='02456 Chapter 6 Complex and quaternionic neural networks B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Schneider, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Berseghyan 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction Neural networks in the real domain have been studied for a long time and achieved promising results in many vision tasks for recent years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, the extensions of the neural network models in other number fields and their potential applications are not fully-investigated yet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Complex numbers play an important role in practical applications and fundamental theorems in various fields of engineering such as electromagnetics, communication, control theory, and quantum mechanics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The application of complex numbers to neural networks has recently attracted attention because they tend to improve the learning ability and conform to the above mentioned applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They enable the modeling of a point in two-dimensional space as a single entity, rather than as a set of two data items on which 2D geometrical affine operations are performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It has been shown that a neural network with the representation and operations of complex numbers results in improved performance of the geometrical affine transformation in two-dimensional space, whereas the performance of real-valued (conventional) neural networks is comparatively poor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The opera- tions involving complex numbers would improve the performance of neural networks for processing two-dimensional data, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' book [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In the 1870s, William Kingdon Clifford introduced his geometric algebra, building on earlier works of Sir William Rowan Hamilton and Hermann Gunther Grassmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Clifford intended to describe the geometric properties of vectors, planes, and higher-dimensional objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Most physi- cists encounter the algebra in the guise of Pauli and Dirac matrix algebras of quantum theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Many roboticists or computer graphic engineers use quaternions for 3D rotation estimation and interpolation, as it is too difficult for them to formulate homogeneous transformations of high-order geometric entities using a point-wise approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They resort often to tensor calculus for multi- variable calculus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Since robotics and engineering make use of the developments of mathematical physics, many beliefs are automatically inherited;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' for instance, some physicists come away from a study of Dirac theory with the view that Clifford’s algebra is inherently quantum-mechanical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 25 26 CHAPTER 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' COMPLEX AND QUATERNIONIC NEURAL NETWORKS Extension of neural networks on hypercomplex number system is one of such research efforts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Input, output, and internal state of a neuron which is the basic computational unit are represented by hypercomplex number in these types of neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Quaternion neural networks are models in which computations of the neurons are based on quaternions, the four-dimensional equivalents of imaginary numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The quaternion neural net- work also performs superior in terms of convergence speed to a real-valued neural network with respect to the 3-bit parity check problem, as simulations show.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Consequently, the application of hy- percomplex numbers, particularly quaternions, to neural networks has been investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Quater- nions are a class of hypercomplex number systems, a four-dimensional extension of imaginary numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' One of benefits by quaternions is that affine transformation of geometric figures in three-dimensional space (3D geometrical affine transformation), especially spatial rotation, can be represented compactly and efficiently;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' in recent years, quaternions are extensively used in the fields of robotics, control of satellites, and computer graphics, etc, see for example [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In that sense, it is thought that it is very useful to employ complex numbers and quaternions that can calculate two or three dimensional information as a unit as expressions of neurons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In fact, it is suggested that complex-valued and quaternionic feed forward neural networks have a remarkable learning ability in terms of affine transformation problems in two or three dimensional space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The role of Neural Networks in today’s scientific community cannot be denied, its vast applications, from engineering to medicine, these are based on continuously improving algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This motivated our research group to begin the approach towards the creation of a mathematical basis for the field of Hypercomplex Neural Networks which could bring better, faster algorithms, and be useful in a wide range of computations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In the underlying mathematical theories, the choice of a system of constants plays an important role, and advancing from a theory built on real numbers to hypercomplex ones is bound to give improved algorithms, due to the rich analysis of the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Elements of quaternionic analysis In this section we present briefly the basic definitions and results of quaternionic analysis which are necessary for our purpose.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For more information, we refer the reader to [8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Let H be the set of real quaternions, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', that each quaternion a is represented in the form a = a0 + a1i + a2j + a3k, with {ak} ⊂ R, k = 0, 1, 2, 3 and i, j, k are the quaternionic imaginary units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The basic elements define arithmetic rules in H, which are given by the following relations: i2 = j2 = k2 = −1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ij = k = −ji;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' jk = i = −kj;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ki = j = −ik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The quaternionic conjugation of a = a0 + a1i + a2j + a3k is given by ¯a := a0 − a1i − a2j − a3k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It is easy seen that a¯a = ¯aa = a2 0 + a2 1 + a2 2 + a2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Note that for a, b ∈ H, ab = ¯b¯a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We identify the space C2 with the set H of quaternions: let (z1, z2) = (x0 + ix1, x2 + ix3) be two complex numbers with the imaginary unit i, and let j be another imaginary unit such that j2 = −1 and ij + ji = 0 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In particular, for a ∈ C and by abuse of notation if ¯a denoted the complex conjugate of a, we have aj = j¯a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The set of elements of the form q = z1 + z2j, z1, z2 ∈ C, endowed both with a component-wise addition and with the associative multiplication is then another way of stating H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The quaternion conjugation gives: z1 + z2j := ¯z1 − z2j and q¯q = ¯qq = |z1|2 + |z2|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Let E be a bounded subset of R4 ∼= C2 ∼= C × C and denote by BC(E, H) the class of H-valued bounded continuous functions on E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For f ∈ BC(E, H) we define the modulus of continuity of f 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ELEMENTS OF QUATERNIONIC ANALYSIS 27 as a non-negative function wf(δ), δ > 0, given by wf(δ) := sup |x−y|≤δ {|f(x) − f(y)| : x, y ∈ E}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For 0 < ν ≤ 1 if sup 0<δ≤diam E �wf(δ) δν � < ∞, for 0 < ν ≤ 1, then f becomes a H¨older continuous with exponent ν function in E (Lipschitz continuous for ν = 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We will denote by C0,ν(E, H) := {f ∈ BC(E, H) : sup 0<δ≤diam E �wf(δ) δν � < ∞}, the collection of H¨older continuous functions on E, for 0 < ν ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We say ([6]) that a closed set E in R4 is an Ahlfors-David regular set (in short AD-regular) if there exists a constant c > 0 such that for all x ∈ E and 0 < r < diam E there holds c−1r3 ≤ H3(E ∩ B(x, r)) ≤ cr3, where B(x, r) is closed ball with center x and radius r and H3 is the 3-dimensional Hausdorff measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The AD-regularity condition implies a uniform positive and finite bound on E for the upper and lower density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Moreover, we notice that such condition produces a very wide class of surfaces that contains the classes of surfaces classically considered in the literature: Liapunov surfaces, smooth surfaces and Lipschitz ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Finally we would like to remark that AD-regular sets are not always rectifiable in the sense of Federer [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In what follows, Ω ⊂ R4 stands for a bounded domain with an AD-regular rectifiable boundary Γ and let Ω+ := Ω;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ω− := R4 \\ Ω+, where both open sets are assumed to be connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For continuously real-differentiable function H-valued functions f := f0+f1i+f2j+f3k : Ω → H,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' the operator ψD := ∂ ∂x0 + i ∂ ∂x1 − j ∂ ∂x2 + k ∂ ∂x3 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' associated to the structural set H-vector ψ := {1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' −j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' k} is called the Cauchy–Riemann operator,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' which can be written in complex form as: ψD = 2 � ∂ ∂¯z1 − j ∂ ∂¯z2 � A factorization of the Laplacian is given by ψD ¯ ψD = ¯ ψD ψD = ∆H,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' where ¯ ψD := 2 � ∂ ∂¯z1 + j ∂ ∂¯z2 � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' and ∆H[f] := ∆R4[f0] + ∆R4[f1] + ∆R4[f2] + ∆R4[f3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A function f : Ω → H is called left ψ−hyperholomorphic in Ω if ψD[f](ξ) = 0 for ∀ξ ∈ Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 28 CHAPTER 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' COMPLEX AND QUATERNIONIC NEURAL NETWORKS We will denote by ψM(Ω, H) := {f ∈ C1(Ω, H) : ψD[f](ξ) = 0, ∀ξ ∈ Ω}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Under assumption f ∈ ψM(Ω, H) and following similar arguments to those in [4, page 3875] we have the Cauchy integral formula � Γ Kψ(τ − t) · nψ(τ) · f(τ) dH3 τ = f(t), t ∈ Ω+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) For a survey of the theory of ψ-hyperholomorphic functions along classical lines we refer the reader to [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' An easy computation shows that if f = u + vj with u = f0 + if1 and v = f2 + if3, then ψDf = 0 ⇐⇒ � ∂¯z1u + ∂z2¯v = 0 ∂¯z2u − ∂z1¯v = 0, which express the direct relation between the ψ-hyperholomorphic functions and solutions of the Cimmino system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The most important examples of a ψ-hyperholomorphic function is the function Kψ(q) = 1 2π2 ¯z1 + ¯z2j (|z1|2 + |z2|2)2 , z1, z2 ̸= 0, which is obtained by applying ¯ ψD to the fundamental solution of the Lapacian ∆R4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It is known as the Cauchy kernel and it represents a fundamental solution to both operators ψD and ¯ ψD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Poincar´e-Bertrand formula for ψ-hyperholomorphic sin- gular integrals The Cauchy kernel Kψ generates important integrals for us: ψCΓ[f](q) := � Γ Kψ(ξ − q) nψ(ξ) · f(ξ) dH3 ξ, q ∈ R4 \\ Γ, ψSΓ[f](q) = 2 � Γ Kψ(ξ − q) nψ(ξ) (f(ξ) − f(q)) dH3 ξ + f(q), q ∈ Γ, where nψ(ξ) := n0 + n1i − n2j + n3k with (n0, n1, n2, n3) ∈ R4 being the outward unit normal vector on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' By using ideas from [3], we have Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 In general, the integral � Γ Kψ(ξ − q) nψ(ξ) dH3 ξ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' POINCAR´E-BERTRAND FORMULA FOR ψ-HYPERHOLOMORPHIC SINGULAR INTEGRALS29 has no sense for every q ∈ Γ, hence the formula � Γ Kψ(ξ−q) nψ(ξ) (f(ξ)−f(q)) dH3 ξ = � Γ Kψ(ξ−q) nψ(ξ) f(ξ) dH3 ξ− �� Γ Kψ(ξ − q) nψ(ξ) dH3 ξ � f(q) is generaly not valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In case when the singular integral 2 � Γ Kψ(ξ − q) nψ(ξ) dH3 ξ has a finite value α(q) for ∀q ∈ Γ, then ψSΓ[f](q) = 2 � Γ Kψ(ξ − q) nψ(ξ) f(ξ) dH3 ξ + (1 − α(q))f(q).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' While the first is a ψ-hyperholomorphic version of the usual Cauchy type integral the second represents its singular version, whose integral has to be taken in the sense of Cauchy’s principal value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In order to facilitate their usage, we present below some basic properties of the ψ-hyperholomorphic singular integrals, thus making our exposition self-contained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 [5] Let Ω be a bounded domain in R4 with AD-regular boundary Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Let f ∈ C0,ν(Γ, H).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then the following limits exist: lim Ω±∋q→ξ∈Γ(ψCΓ[f](q)) =: ψC± Γ [f](ξ), moreover the following identities hold: ψC± Γ [f](ξ) = 1 2[ψSΓ[f](ξ) ± f(ξ)], (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) for all ξ ∈ Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 [5] If Γ is a AD-regular surface, then for f ∈ C0,ν(Γ, H), 0 < ν < 1 we have the following formula: ψS2 Γ[f](ξ) = f(ξ), ξ ∈ Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 If {t, τ} ⊂ Γ, t ̸= ξ, then � Γτ Kψ(τ − t) nψ(τ) Kψ(τ − ξ) dH3 τ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The proof of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 is similar of the proof of Lemma 3 in [11], therefore we refer to [11] for identical parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 Let f(ξ, τ) := f0(ξ,τ) |ξ−τ|µ , 0 ≤ µ < 3, and f0 ∈ C0,ν(Γ × Γ, H).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then the next formula of changing of integration order is true for all t ∈ Γ: � Γτ Kψ(τ − t) nψ(τ) [f(ξ, τ) − f(τ, τ)] dH3 τ � Γξ nψ(ξ) dH3 ξ = = � Γξ � Γτ Kψ(τ − t) nψ(τ) [f(ξ, τ) − f(τ, τ)] dH3 τ nψ(ξ) dH3 ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 30 CHAPTER 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' COMPLEX AND QUATERNIONIC NEURAL NETWORKS Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The proof of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 is along the same line of the proof of Theorem 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 in [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' □ The Poincar´e-Bertrand formula in the ψ-hyperholomorphic framework is established by our next theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6 Let Ω be a bounded domain in R4 with AD-regular boundary Γ and let f ∈ C0,ν(Γ× Γ, H).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then for all w ∈ Γ � Γz Kψ(z − t) nψ(z) dH3 z � Γξ Kψ(ξ − z) nψ(ξ)[f(ξ, z) − f(z, t)] dH3 ξ = = � Γξ � Γz Kψ(z − t) nψ(z) dH3 z Kψ(ξ − z) nψ(ξ)[f(ξ, z) − f(z, t)] dH3 ξ + α2(t)f(t, t), and the integrals being understood in the sense of the Cauchy principal value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' If Ω be a bounded domain in R4 with a smooth boundary Γ then α = 1 2 and the formula reduces to the Poincar´e-Bertrand formula (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', [11]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Let � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) [f(ξ, τ) − f(τ, t)] dH3 ξ = = � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) ([f(ξ, τ) − f(τ, t)] − f(τ, τ)) dH3 ξ+ + � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) [f(τ, τ) − f(t, t)] dH3 ξ+ + � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) f(t, t) dH3 ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In the first two quaternionic integrals on the right-hand side we can change the order of integration by Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5 we have � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) [f(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − f(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)] dH3 ξ = = � Γξ � Γτ Kψ(τ − t) nψ(τ) dH3 τ Kψ(ξ − τ) nψ(ξ) ([f(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − f(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)] − f(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ)) dH3 ξ+ + � Γξ � Γτ Kψ(τ − t) nψ(τ) dH3 τ Kψ(ξ − τ) nψ(ξ) [f(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − f(t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)] dH3 ξ+ + � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) f(t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t) dH3 ξ = = � Γξ � Γτ Kψ(τ − t) nψ(τ) dH3 τ Kψ(ξ − τ) nψ(ξ) [f(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − f(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)] dH3 ξ− − � Γξ �� Γτ Kψ(τ − t) nψ(τ) dH3 τ Kψ(ξ − τ) � nψ(ξ) f(t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t) dH3 ξ+ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' POINCAR´E-BERTRAND FORMULA FOR THE CAUCHY-CIMMINO SINGULAR INTEGRALS31 + � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ) f(t, t) dH3 ξ = (by using Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 and the Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) = � Γξ � Γτ Kψ(τ − t) nψ(τ) dH3 τ Kψ(ξ − τ) nψ(ξ) [f(ξ, τ) − f(τ, t)] dH3 ξ + α2(t)f(t, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' □ Suppose that f(ξ, τ) = f(ξ) ∈ C0,ν(Γ, H) is ψ-hyperholomorphic extension into Ω, then the composition formula for ψ-hyperholomorphic functions can be written as: Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='7 (Composition formula) Let Ω be a bounded domain in R4 with AD-regular bound- ary Γ and let f ∈ C0,ν(Γ, H).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' If f(ξ) can be extended ψ-hyperholomorphically into Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then for all t ∈ Γ, � Γτ Kψ(τ − t) nψ(τ) dH3 τ � Γξ Kψ(ξ − τ) nψ(ξ)[f(ξ) − f(τ)] dH3 ξ = α2(t)f(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2) Note that if ψ ˜Sf := 2 � Γξ Kψ(ξ − τ) nψ(ξ)[f(ξ) − f(τ)] dH3 ξ, than formula (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2) means that ψ ˜S2f = 4α2(t)f(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Since f(ξ) can be holomorphic extented into Ω, then by Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 and Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 ψC+f(z) = f(z), z ∈ Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' By formula (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) and Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1, we have 2f(z) = ψ ˜Sf(ξ) + 2(1 − α(z)) f(z), moreover ψ ˜S2f = ψ ˜S ψ ˜Sf = ψ ˜S[2αf] = 4α2f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' □ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Poincar´e-Bertrand formula for the Cauchy-Cimmino sin- gular integrals Using the representation of the quaternionic Cauchy kernel Kψ and the normal vector nψ in the complex form, we have: Kψ(ξ − z) nψ(ξ) = K1(ξ, z) + K2(ξ, z)j, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) with K1(ξ, z) := 1 2π2 (¯ξ1 − ¯z1)(n0 + in1) + (¯ξ2 − ¯z2)(n2 + in3) (|ξ1 − z1|2 + |ξ2 − z2|2)2 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2) 32 CHAPTER 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' COMPLEX AND QUATERNIONIC NEURAL NETWORKS and K2(ξ, z) := 1 2π2 (¯ξ2 − ¯z2)(n0 + in1) − (¯ξ1 − ¯z1)(n2 + in3) (|ξ1 − z1|2 + |ξ2 − z2|2)2 , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3) where ξ = ξ1 + ξ2j, z = z1 + z2j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Thus,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ψCΓ[u + vj](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = C1[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) + C2[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2)j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) /∈ Γ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ψSΓ[u + vj](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = S1[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) + S2[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v]j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) ∈ Γ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' where C1[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = � Γ [(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]u(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3 ξ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='ξ2− − � Γ [(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯v(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3 ξ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='ξ2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' C2[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = � Γ [(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]v(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3 ξ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='ξ2+ + � Γ [(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯u(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3 ξ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='ξ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The pair (C1, C2) of integrals for (z1, z2) ∈ C2 play the role of an analog of a Cauchy type integral in theory of the Cimmino system of partial differential equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Similarly the singular Cauchy-Cimmino integral operators are defined formally as pair (S1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' S2),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' of the following singular integrals taken in the sense of Cauchy’s principal value S1[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = 2 � Γ [(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)][u(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) − u(z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2)] 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3− −2 � Γ [(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)][¯v(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) − ¯v(z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2)] 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3+ +u(z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = = 2 � Γ [(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]u(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3− −2 � Γ [(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯v(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3+ +(1 − α(z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2)) u(z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' S2[u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v](z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2) = 2 � Γ [(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)][v(ζ1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ζ2) − v(z1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' z2)] 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3+ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' POINCAR´E-BERTRAND FORMULA FOR THE CAUCHY-CIMMINO SINGULAR INTEGRALS33 +2 � Γ [(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)][¯u(ζ1, ζ2) − ¯u(z1, z2)] 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3+ +v(z1, z2) = = 2 � Γ [(¯ζ1 − ¯z1)(n0 + in1) + (¯ζ2 − ¯z2)(n2 + in3)]v(ζ1, ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3+ +2 � Γ [(¯ζ2 − ¯z2)(n0 + in1) − (¯ζ1 − ¯z1)(n2 + in3)]¯u(ζ1, ζ2) 2π2(|ζ1 − z1|2 + |ζ2 − z2|2)2 dH3+ +(1 − α(z1, z2)) v(z1, z2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Returning to Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 substitute (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3) into Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' we have: let Ω be a bounded domain in R4 with AD-regular boundary Γ and let (u,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' v) ∈ C0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='ν(Γ×Γ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' C)×C0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='ν(Γ×Γ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' C),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' then for all t ∈ Γ � Γτ � Γξ [K1(τ − t) + K2(τ − t)j] {[K1(ξ − τ) + K2(ξ − τ)j](u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)+ +(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j)dH3 ξ dH3 τ � = = � Γτ � Γξ [K1(τ − t) + K2(τ − t)j] {K1(ξ − τ)(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)) + K1(ξ − τ)(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(ξ − τ)j(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)) + K2(ξ − τ)j(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))jdH3 ξ dH3 τ � = = � Γτ � Γξ {K1(τ − t) K1(ξ − τ)(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))+ +K1(τ − t) K1(ξ − τ)(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K1(τ − t) K2(ξ − τ)j(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))+ +K1(τ − t) K2(ξ − τ)j(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(τ − t) j K1(ξ − τ)(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))+ +K2(τ − t) j K1(ξ − τ)(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(τ − t) j K2(ξ − τ) j(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))+ + K2(τ − t) j K2(ξ − τ) j (v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t)) j} dH3 ξ dH3 τ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Note that � Γξ � Γτ Kψ(τ − t) nψ(τ) dH3 τ Kψ(ξ − τ) nψ(ξ)[f(ξ, τ) − f(τ, t)] dH3 ξ + α2(t)f(t, t) = = � Γξ � Γτ {K1(τ − t) K1(ξ − τ)(u(ξ, τ) − u(τ, t))+ +K1(τ − t) K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ 34 CHAPTER 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' COMPLEX AND QUATERNIONIC NEURAL NETWORKS +K1(τ − t) K2(ξ − τ)j(u(ξ, τ) − u(τ, t))+ +K1(τ − t) K2(ξ − τ)j(v(ξ, τ) − v(τ, t))j+ +K2(τ − t) j K1(ξ − τ)(u(ξ, τ) − u(τ, t))+ +K2(τ − t) j K1(ξ − τ)(v(ξ, τ) − v(τ, t))j+ +K2(τ − t) j K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ + K2(τ − t) j K2(ξ − τ) j (v(ξ, τ) − v(τ, t)) j} dH3 τ dH3 ξ+ +α2(t)(u(t, t) + v(t, t)j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' If one separates complex coordinates into above equality,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' then the following equalities can be easy obtained formulae for Cimmino system: � Γτ � Γξ [K1(τ − t) K1(ξ − τ) (u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))+ +K1(τ − t) K2(ξ − τ) j(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(τ − t) jK1(ξ − τ)(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(τ − t) j K2(ξ − τ) j(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))] dH3 ξ dH3 τ = = � Γξ � Γτ [K1(τ − t) K1(ξ − τ) (u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))+ +K1(τ − t) K2(ξ − τ) j(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(τ − t) j K1(ξ − τ)(v(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − v(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))j+ +K2(τ − t) j K2(ξ − τ) j(u(ξ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' τ) − u(τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t))] dH3 τ dH3 ξ+ +α2(t) u(t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' t);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' and � Γτ � Γξ [K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))j+ +K1(τ − t) K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ +K2(τ − t) j K1(ξ − τ) (u(ξ, τ) − u(τ, t))+ +K2(τ − t) j K2(ξ − τ) j(v(ξ, τ) − v(τ, t))j] dH3 ξ dH3 τ = = � Γξ � Γτ [K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))j+ +K1(τ − t) K2(ξ − τ) j(u(ξ, τ) − u(τ, t))+ +K2(τ − t) j K1(ξ − τ) (u(ξ, τ) − u(τ, t))+ +K2(τ − t) j K2(ξ − τ) j(v(ξ, τ) − v(τ, t))j] dH3 τ dH3 ξ+ +α2(t) v(t, t)j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' POINCAR´E-BERTRAND FORMULA FOR THE CAUCHY-CIMMINO SINGULAR INTEGRALS35 Here, we have � Γτ � Γξ [K1(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4) −K1(τ − t) K2(ξ − τ)(v(ξ, τ) − v(τ, t))− −K2(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))− −K2(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))] dH3 ξ dH3 τ = = � Γξ � Γτ [K1(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− −K1(τ − t) K2(ξ − τ)(v(ξ, τ) − v(τ, t))− −K2(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))− −K2(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))] dH3 τ dH3 ξ+ +α2(t)u(t, t);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' and � Γτ � Γξ [K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))+ (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5) +K1(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))+ +K2(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− −K2(τ − t) K2(ξ − τ) (v(ξ, τ) − v(τ, t))] dH3 ξ dH3 τ = = � Γξ � Γτ [K1(τ − t) K1(ξ − τ) (v(ξ, τ) − v(τ, t))+ +K1(τ − t) K2(ξ − τ) (u(ξ, τ) − u(τ, t))+ +K2(τ − t) K1(ξ − τ) (u(ξ, τ) − u(τ, t))− −K2(τ − t) K2(ξ − τ) (v(ξ, τ) − v(τ, t))] dH3 τ dH3 ξ+ +α2(t) v(t, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Let N1[f](z) := 2 � Γ K1(ξ − z) f(ξ) dH3 ξ + (1 − α(z))f(z), ∀z ∈ Γ, and N2[f](z) := −2 � Γ K2(ξ − z)f(ξ) dH3 ξ + (1 − α(z))f(z), ∀z ∈ Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' If one separates complex coordinates in Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 we have: � Γξ � Γτ K1(τ − z) K1(ξ − τ) u(ξ) dH3 τ dH3 ξ − � Γξ � Γτ K1(τ − z) K2(ξ − τ) v(ξ) dH3 τ dH3 ξ− 36 CHAPTER 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' COMPLEX AND QUATERNIONIC NEURAL NETWORKS − � Γξ � Γτ K2(τ − z) K1(ξ − τ) v(ξ) dH3 τ dH3 ξ − � Γξ � Γτ K2(τ − z) K2(ξ − τ) u(ξ) dH3 τ dH3 ξ = 0, � Γξ � Γτ K1(τ − z) K1(ξ − τ) v(ξ) dH3 τ dH3 ξ + � Γξ � Γτ K1(τ − z) K2(ξ − τ) u(ξ) dH3 τ dH3 ξ+ + � Γξ � Γτ K2(τ − z) K1(ξ − τ) u(ξ) dH3 τ dH3 ξ − � Γξ � Γτ K2(τ − z) K2(ξ − τ) v(ξ) dH3 τ dH3 ξ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Thus, if functions u and v depend only on ξ, then we can write: N 2 1 − N 2 2 = I, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6) N1 N2 + N2 N1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='7) Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Note that N 2 2 ̸= 0 in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Indeed, if N 2 2 [f] = 0 for all f ∈ C1,ν(Γ, C).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then the function N2[f] can be holomorphically extended from Γ into Ω+ and by the uniqueness theorem for harmonic functions this extension is given by F(z) = −2 � Γ K2(ξ − z)f(ξ) dH3 ξ, z ∈ Ω+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' But then ψC[f] and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1) imply that the function Gf(z) := � Γ K1(ξ − z) f(ξ) dH3 ξ, is holomorphic for any f ∈ C1,ν(Γ, C) which is not true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 Let f ∈ C1(Ω+, C) is representable in Ω+ ⊂ C2 by f(z) = � Γ K1(ξ − z) f(ξ) dH3 ξ, z ∈ Ω+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Then f is holomorphic in Ω+.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Applying the Sokhotski-Plemelj formulae to f we have f(z) = 1 2 [N1[f](z) + f(z)], z ∈ Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Thus, N 2 1 [f] = I[f], and from (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6) we have N 2 2 [f] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' But by Remark 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 we have that function F defined by F(z) := � Γ K1(ξ − z) f(ξ) dH3 ξ is holomorphic in Ω+ and with F | Ω+ = u we completed the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' □ From Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 and [10, page 211], the term � Γξ � Γτ K1(τ − t) K1(ξ − τ) dH3 τ dH3 ξ = 0 ∀t ∈ Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BIBLIOGRAPHY 37 Then from Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4, we have that � Γξ � Γτ K2(τ − t) K2(ξ − τ) dH3 τ dH3 ξ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='8) So, by using (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='8) and Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='6, for t ∈ Γ and f ∈ C0,ν(Γ × Γ, C) we have � Γτ � Γξ K2(τ − t) K2(ξ − τ) [f(ξ, τ) − f(τ, t)] dH3 τ dH3 ξ = (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='9) = � Γξ � Γτ K2(τ − t) K2(ξ − τ) [f(ξ, τ) − f(τ, t)] dH3 ξ dH3 τ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Comparing that last equality with (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='5), for f ∈ C0,ν(Γ × Γ, C) for singular integrals for Cimmino system the structural analog of the Poin´care-Bertrand formula is true: � Γτ � Γξ K1(τ − t) K1(ξ − τ) [f(ξ, τ) − f(τ, t)]dH3 ξ dH3 τ = = � Γξ � Γτ K1(τ − t) K1(ξ − τ) [f(ξ, τ) − f(τ, t)]dH3 τ dH3 ξ + α2(t)f(t, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgement This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001 Bibliography [1] Hirose, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Complex-Valued Neural Networks - Advances and Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' John Wiley & Sons Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2013, 304 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [2] Isokawa, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Kusakabe, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Matsui, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Peper, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Quaternion Neural Network and Its Appli- cation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In: Palade, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Howlett, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Jain, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (eds) Knowledge-Based Intelligent Information and Engineering Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' KES 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lecture Notes in Computer Science, vol 2774.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Springer, Berlin, Heidelberg, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [3] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Abreu Blaya, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bory Reyes and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kats (2015): Cauchy integral and singular integral operator over closed Jordan curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Monatsh Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 176: 1–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [4] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Abreu Blaya, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bory Reyes, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Guzm´an Ad´an and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Schneider (2012): Boundary value problems for the Cimmino system via quaternionic analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Comp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', 219, 3872– 3881.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [5] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Abreu Blaya, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bory Reyes and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Schneider (2014): On Cauchy type integrals related to the Cimmino system of partial differential equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Operator theory, operator algebras and applications, 81–92, Oper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Theory Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', 242, Birkh¨auser/Springer, Basel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 38 BIBLIOGRAPHY [6] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' David and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Semmes (1993): Analysis of and on uniformly rectifiable sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Mathematical Surveys and Monographs 38, AMS, Providence, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [7] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Federer (1969): Geometric Measure Theory, Grundlehren Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Wiss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 153, Springer, New York.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [8] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' G¨urlebeck and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Spr¨ossig (1997): Quaternionic and Clifford Calculus for Physicists and Engineers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' John Wiley & Sones, England, 371 pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [9] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kravchenko and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Shapiro (1996): Integral Representations for Spatial Models of Mathe- matical Physics, Pitman Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Notes in Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 351, Longman, Harlow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [10] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kytmanov (1995): The Bochner-Martinelli Integral and Its Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Birkh¨auser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [11] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Mitelman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Shapiro (1994): Formulae of changing of integration order and of inversion for some multidimensional singular integrals and hypercomplex analysis, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 5 (1), 11–27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [12] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Shapiro (1995): Some remarks on generalizations of the one-dimensional complex analysis: Hypercomplex approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Functional Analytic Methods in Complex Analysis and Applications to Partial Differential Equations (Trieste, 1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' World Scientific Publ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', River Edge, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', 379–401.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chapter 7 Fuzzy neural networks I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfiljeva, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Novak In the field of artificial intelligence, neuro-fuzzy refers to combination of artificial neural networks and fuzzy logic [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A neuro-fuzzy system is commonly known in the literature as a fuzzy neural network (FNN) or a neuro-fuzzy system (NFS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A neuro-fuzzy system (used hereafter) incorporates the human reasoning style of fuzzy systems through the use of fuzzy sets and a linguistic model consisting of a set of fuzzy IF-THEN rules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The main strength of neuro-fuzzy systems is that they are universal approximators, the result of which allows interpretation by fuzzy IF-THEN rules [2, 3, 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The main specificity of neuro-fuzzy systems is the presence of two conflicting requirements for fuzzy modeling: interpretability and accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In practice, one of two requirements prevails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' As a consequence, the field of study of neuro-fuzzy systems is divided into two areas: linguistic fuzzy modeling focused on interpretability, mainly the Mamdani model;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' and accuracy-oriented fuzzy modeling, mainly the Takagi-Sugeno-Kangi (TSK) model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A new line of research in the field of data flow mining considers the case when neuro-fuzzy systems are constantly updated with new incoming data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The system’s response lies in its dynamic updates, including not only recursive adaptation of model parameters, but also dynamic evolu- tion and reduction of model components to adequately handle concept drift and keep the model “relevant” at all times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Detailed reviews of various approaches to the development of neuro-fuzzy systems can be found in [2] and [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' A neuro-fuzzy system is represented as special three-layer feedforward neural network (ANN) where [4] The first layer corresponds to the input variables, The second layer symbolizes the fuzzy rules, The third layer represents the output variables, The fuzzy sets are converted as (fuzzy) connection weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The learning procedure is constrained to ensure the semantic properties of the underlying fuzzy system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 39 40 CHAPTER 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' FUZZY NEURAL NETWORKS Both characteristics: interpretability and accuracy become relevant when the NFS has already been successfully developed for solving a specific problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This problem-oriented perspective ex- poses the limitations of NFS modeled by artificial neural networks (ANNs) and raises the question: can NFS be extended to the next generation of Convolutional Neural Networks (CNNs)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Below we consider the main problems specific to neural network computing technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The main problems solved with the help of neural networks are classification and regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Other problems are their modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For example, semantic/instance segmentation is based on pixel-wise classification;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' object detection is a regression on rectangle/polygon areas, time series prediction is a regression, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Let us discuss and compare the capabilities of ANN and CNN in solving these problems [6, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Both neural networks as computational models have a similar architecture with a common step — feature extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The main difference is how they transform the input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' From a CNN point of view, feature extraction is a gradual process focused on data with spatial dependencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The convolution shifts its window over the data, which leads to invariance to data translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Convolutions gradually extract many complex and abstract features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The result of this stage is a vector of descriptive features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' On the other hand, ANN feature extraction can be interpreted as a transformation of the input space into a space more suitable for a given task, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', making data samples separable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Consequently, ANN is typically used for data without spatial dependencies, such as tabular data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The difference between ANN and CNN appears in different models of their computational units – neurons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ANN neuron output a is given as a = g(b + � i wixi) = g(b + wx), while the convolutional neuron output aij is given as aij = g + b( l � m=1 l � n=1 Wm,nxi+m,j+n), where W is a convolutional kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' CNNs are currently the state-of-the-art models in all major computer vision tasks, from image classification and object detection to instance segmentation [17, 8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' CNNs combine three archi- tectural ideas: local receptive fields to extract elementary features from images;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' shared weights to extract the same set of elementary features from the entire input image and to lower computational costs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' local averaging and sub-sampling to reduce the resolution of feature maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Typically, CNNs are built as a sequence of convolutional layers and pooled layers to automat- ically learn higher and higher level features [5, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' At the end of the sequence, one or more fully connected layers are used to map the output feature map to the scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This structure entails complex internal relationships, which are difficult to explain using the Mamdani or Takagi-Sugeno type fuzzy models discussed above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Fortunately, the path to explain- ability for CNNs is easier than for other types of NN models, since human cognitive abilities contribute to the understanding of visual data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' If we agree that the interpretability of a model is something that comes from the design of the model itself then [1] an explainable AI is one that offers reasonable data processing details that make its operation clear or easy to understand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BIBLIOGRAPHY 41 With this observation in mind, we single out one particular fuzzy modeling technique, known as fuzzy (F-)transforms, as a technique whose computational model is similar to the CNN model [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It has been proven in many papers [10]–[14] that the higher degree F-transforms are univer- sal approximators of smooth and discrete functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The approximation on a whole domain is a combination of locally best approximations called F-transform components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' They are represented by higher degree polynomials and parametrized by coefficients that correspond to average values of local and nonlocal derivatives of various degrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' If the F-transform is applied to images, then its parameters are used in regularization, edge detection, characterization of patches [15], [7], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Their computation can be performed by discrete convolutions with kernels that, up to the second degree, are similar to those widely used in image processing, namely: Gaussian, Sobel, Laplacian [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Thus, we can draw an analogy with the CNN method of computation and call the parameters of the higher degree F-transform features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Moreover, based on a clear understanding of these fea- tures’ semantic meaning, we say that a CNN with the F-transform kernels extracts features with a clear interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In addition, the sequential application of F-transform kernels with an up to the second degree gives average (nonlocal) derivatives of higher and higher degrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The following text details the neural network design supported by the theoretically proven F- transform methodology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The LeNet-5 architecture was chosen as the prototype architecture, and a new CNN called FTNet was compiled with kernels taken from the F-transform theory of the higher degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The performance of FTNet was examined on several datasets and on them it converges faster in terms of accuracy/loss than the baseline network, subject to the same number of steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We compared the F-transform kernels in the first layer before and after training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' We observed that the kernels remain unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Moreover, their shapes are similar to the shapes of extracted kernel groups from the most known CNNs: VGG16 [8], VGG19 [8], InceptionV3 [9], MobileNet [14], ResNet [14], and AlexNet [17] as the representative examples of CNNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgement This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001 Bibliography [1] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Arrieta, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' D´ıaz-Rodr´ıguez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Del Ser, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bennetot, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Tabik, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Barbado, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Garc´ıa, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='Gil-L´opez, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Molina, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Benjamins, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chatila, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Herrera, Explainable Artificial Intelli- gence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI, In- formation Fusion, 58 (2020) 82-115.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [2] Kosko, Bart (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Networks and Fuzzy Systems: A Dynamical Systems Approach to Machine Intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Englewood Cliffs, NJ: Prentice Hall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ISBN 0-13-611435-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [3] Lin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='-T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Lee, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neural Fuzzy Systems: A Neuro-Fuzzy Synergism to Intelligent Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Upper Saddle River, NJ: Prentice Hall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [4] Klawonn, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Kruse R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Nauck, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' and Borgelt, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Neuro-Fuzzy-Systeme (Vieweg, Wiesbaden).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 42 BIBLIOGRAPHY [5] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Popko, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Weinstein, Fuzzy logic module of convolutional neural network for handwrit- ten digits recognition, in Journal of Physics: Conference Series 738 (2016) 012123.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [6] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Yazdanbakhsh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Dick, A deep neuro-fuzzy network for image classification, arXiv preprint arXiv:2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='01686, 2019 [7] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Gastaldi, Shake-shake regularization, arXiv preprint arXiv:1705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='07485, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [8] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ogden, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Adelson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bergen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Burt, Pyramid-based computer graphics, RCA Eng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 30 (1985), 4–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [9] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Simonyan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Zisserman, Very deep convolutional networks for large-scale image recognition, arXiv preprint arXiv:1409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1556,2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [10] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfilieva, Fuzzy transforms: Theory and applications, Fuzzy sets and systems, 157/8 (2006 )993–1023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [11] Perfilieva, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Daˇnkov´a, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Bede, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Towards a higher degree F-transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Fuzzy Sets and Systems 2011, sv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 180, s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 3-19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' ISSN 1063-6706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [12] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfilieva, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Holcapek, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kreinovich, A new reconstruction from the F-transform compo- nents, Fuzzy Sets and Systems, 288 (2016) 3–25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [13] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hurtik, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Molek, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfilieva, Novel dimensionality reduction approach for unsupervised learning on small datasets, Pattern Recognition, 103 (2020) 107291.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [14] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Molek, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfilieva, Deep Learning and Higher Degree F-Transforms: Interpretable Kernels Before and After Learning, International Journal of Computational Intelligence Systems, 13/1 (2020) 1404 - 1414.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [15] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Perfilieva, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Vlaˇs´anek, Total variation with nonlocal FT-Laplacian for patch-based inpaint- ing, Soft Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 23 (2019), 1833–1841.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [16] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Patane, Data-Driven Fuzzy Transform, IEEE Transactions on Fuzzy Systems, 30/9 (2022) 3774-3784.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' [17] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Pal, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sudeep, Preprocessing for image classification by convolutional neural networks, in IEEE International Conferenceon Recent Trends in Electronics, Information & Communi- cation Technology (RTEICT), IEEE, Bangalore, India, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1778–1781.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Chapter 8 Implementation of neural networks what has been done and what can be done?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kycia, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Artiemjew 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='1 Introduction The unprecedented abundance of Artificial Neural Networks (ANN) applications observed in today’s world has two main factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The first one is the growth of computing power related to more advanced hardware progress, including tensor units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The second reason is software dependent - the appearance of easy-to-use, high-level libraries that allows quick to implement various architectures of ANN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' These two factors make it easy to construct even very advanced structures and transform the discipline from research to engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' There are many architectures of ANN, depending on connection structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The summary of various approaches is presented in the previous Chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' In real-life applications, the most common architecture is feed-forward ANN, which consists of layers whose output preceding one serves as input to another layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Each layer has its specific properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' For implementation, we focus mainly on Python since it is a leading language used in Machine Learning and applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The simplest ’layer’ is a perceptron of McCulloch and Pitts [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It realizes the division of space of data using a hyperplane (top minus one dimensional) into two disjoint classes - corresponding to both sides of the hyperplane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Therefore, the data that are only hyperplane-separable can be distinguished.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The truth table for the XOR logic gate is not hyperplane(linearly)-separable, and this was an input for the first AI winter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The perceptron algorithm is easily implementable from scratch, and it was given in many sources, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', Chapter 2 of [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It is also an easy-to-use implementation as a model Perceptron in Scikit-learn library [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Currently, there is also an abundance of sources where there is a description of constructing ANN from scratch [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, to optimize computation, a more advanced approach that uses 43 44CHAPTER 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' IMPLEMENTATION OF NEURAL NETWORKS - WHAT HAS BEEN DONE AND WHAT CAN BE DONE?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' dedicated libraries is usually needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2 TensorFlow and PyTorch The core of today’s applications of ANN uses two Open Source libraries1: TensorFlow [3] - released by Google.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The current version 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='0 appeared in 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This library is used with high-level API (Application Programming Interface) provided by Keras [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' PyTorch [5] - released by Meta.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Both of them have similar capabilities and philosophies of work, although the architectures of these frameworks are slightly different, mainly because of different algorithms used in various functionalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Both are based on multidimensional arrays, called tensors2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Tensors of these two libraries can use CPU, and GPU computing capabilities, including cooperation with CUDA [6] that highly increase the speed of computations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Tensors are structures that can efficiently carry layers of ANN, and each layer is a table of weights for neurons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Moreover, as discussed in the previous Chapters, the backpropagation algorithm both have automatic differentiation modules (GradientTape for TensorFlow, and AutoGrad for PyTorch).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' These libraries also contain various optimizers for backpropagation, as well as numerous addi- tional features allowing preprocessing, postprocessing, and constructing the whole ANN architec- ture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The literature about the practical use of both libraries is vast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Therefore we focus on two introductory level books [14] for TensorFlow, and [15] for PyTorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Both libraries allow to define of custom architectures that employ framework functionality, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=', automatic differentiation or GPU capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='3 Non-standard architectures We will focus on two examples, from the variety of possibilities, that employ the above libraries to implement non-standard ANN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The first one uses the PyTorch library to implement the Hopfield layer for the feed-froward network, see [10, 11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It seems that the Hopfield layer is quite universal and incorporates a memory layer that enhances the attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The second implementation we chose is an architecture based on four-dimensional hypercomplex algebra provided in [12] using TensorFlow [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The colors are effectively encoded using various four-dimensional hypercomplex numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The approach shows exceptionally high accuracy in image classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1One library, that, in a sense, was a predecessor and origin of modern libraries was Theano developed at Montreal University[7, 8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Currently, it is not popular outside of research applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 2From the mathematical viewpoint, tensors are algebraic multilinear objects with specific laws of transformation between different bases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This means that they are abstract entities independent of the choice of coordinate frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Under the particular choice of the frame, they have a form of multidimensional arrays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The notion of a tensor is usually equivalent to the multidimensional array in computer science, and no specific law of transformation is implied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' CONCLUSIONS 45 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='4 Conclusions Artificial Neural Networks are fast-developing research and engineering areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' The current im- plementation standards revolve around two main libraries: TensorFlow with Keras, and PyTorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Many others, including research, architectures are usually extensions of these two frameworks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' It is very difficult to predict the direction of development of this discipline, including new architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, there is still missing general principle of selecting given architecture of ANN best suitable to analysing specific data under consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' This is the ultimate goal of this discipline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' However, some deeper understanding of ANN at mathematical level is needed first.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Acknowledgments This article has been supported by the Polish National Agency for Strategic Partnership under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' BPI/PST/2021/1/00031/U/00001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Bibliography [1] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' McCulloch, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Pitts, A logical calculus of the ideas immanent in nervous activity, The Bulletin of Mathematical Biophysics, 5(4):115–133, (1943) [2] sklearn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='linear model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='Perceptron: https://scikit-learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/stable/modules/generated/sklearn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='linear_model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='Perceptron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='html, Accessed: 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [3] TensorFlow, https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='tensorflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/, Accessed: 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [4] Keras, https://keras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='io/, Accessed: 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [5] PyTorch, https://pytorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/, Accessed: 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [6] CUDA, https://developer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='nvidia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/cuda-toolkit, Accessed: 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [7] Theano, https://theano-pymc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='readthedocs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='io [8] Theano Development Team, Theano: A Python framework for fast computation of mathemat- ical expressions, arXiv: http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/1605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='02688 [9] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Weidman, Deep Learning from Scratch: Building with Python from First Principles, O’Reilly Media, 2019 [10] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Ramsauer, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sch¨afl, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Lehner, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Seidl, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Widrich, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Adler, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Gruber, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Holzleitner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Pavlovi´c, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Sandve, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Greiff, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kreil, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Kopp, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Klambauer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Brandstetter, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Hochreiter, Hopfield Networks is All You Need, https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='02217 [11] GitHub Hopfield network implementation, https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/ml-jku/hopfield-layers, Accessed:16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [12] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Vieira, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Valle, Acute Lymphoblastic Leukemia Detection Using Hypercomplex-Valued Convolutional Neural Networks, arXiv:https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='org/abs/2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='13273 (2022) 46 BIBLIOGRAPHY [13] Hypercomplex-Valued Convolutional Neural Networks, https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='com/mevalle/Hypercomplex-valued-Convolutional-Neural-Networks, Accessed: 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content='2022 [14] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Raschka, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Mirjalili, Python Machine Learning, Packt Publishing, 3rd edition, 2019 [15] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Raschka, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Mirjalili, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' Mirjalili, Machine Learning with PyTorch and Scikit-Learn, Packt Publishing;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} +page_content=' 1st edition, 2022' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAyT4oBgHgl3EQfOfaf/content/2301.00007v1.pdf'} diff --git a/xNAzT4oBgHgl3EQf7v4z/vector_store/index.pkl b/xNAzT4oBgHgl3EQf7v4z/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..26614382b8678b1078bf952b8984c5642067a414 --- /dev/null +++ b/xNAzT4oBgHgl3EQf7v4z/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a165fa705b486ad0313bfb9c87b2344b66795091615b1b2970e7c65d6b3cff8e +size 163408 diff --git a/xNFIT4oBgHgl3EQfzyta/content/tmp_files/2301.11366v1.pdf.txt b/xNFIT4oBgHgl3EQfzyta/content/tmp_files/2301.11366v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..1cb7264d8ac00eb7770e416d2e7b69349654e359 --- /dev/null +++ b/xNFIT4oBgHgl3EQfzyta/content/tmp_files/2301.11366v1.pdf.txt @@ -0,0 +1,998 @@ +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +DONALD M. DAVIS AND MANYI GUO +Abstract. We prove that a face of a cube can be partitioned into 193 sets +on which the cut locus, or ridge tree, is constant up to isomorphism as a +labeled graph. These are 60 connected open sets and curves bounding them, +and intersection points of curves. Polynomial equations for the curves are +given. Sixteen pairs of sets support the same cut locus class. We present the +177 distinct cut locus classes. +1. Introduction +The cut locus, or ridge tree, of a point P on a convex polyhdedron is the set of +points Q for which there is more than one shortest path from P to Q. Each cut locus +is a tree whose leaves are corner points1 of the polyhedron. In [1] and [2], methods +were developed for determining the cut locus of a point, which we describe in Section +3 and utilize. +The cut locus of P varies continuously with P unless P is a corner point of the +polyhedron, but its combinatorial structure can change abruptly. +We think of a +cut locus as a graph with some vertices labeled by corner points of the polyhedron. +We define an equivalence relation for these labeled graphs by edge-preserving vertex +bijection that preserves labels, and denote by L the equivalence class of a cut locus +L. +In this paper, we consider the cut loci for a cube and find a complete decomposition +of a face of a cube into subsets on which L is constant. The subsets are connected +open sets, curves bounding these sets, and single points where the curves intersect. +These are accurately rendered in Figure 1.1; Figure 2.1 gives an expanded version of +regions in the left quadrant of 1.1. We find that there are 193 subsets altogether, but +Date: January 26, 2023. +Key words and phrases. cut locus, ridge tree, geodesic, cube, star unfolding, +Voronoi diagram. +2000 Mathematics Subject Classification: 52B10, 53C22, 52C30. +1We reserve the term vertex for vertices of the star unfolding and cut locus. +1 +arXiv:2301.11366v1 [math.MG] 26 Jan 2023 + +2 +DONALD M. DAVIS AND MANYI GUO +of these there are 16 pairs which have the same L and so there are 177 distinct L on +a face of a cube. +Figure 1.1. Decomposition of a face into subsets on which L is constant +In Section 2, we give a precise statement of results, including equations of the +curves bounding the regions, and the labeled graphs for a representative set of L. In +Section 3, we present some preliminary information and tools from [2] and [1] needed +in our work. Section 5 gives a proof that the regions and isomorphism classes of their +cut loci are as described. And in Section 6, we prove the completeness of this result. +In Figure 1.2, we picture a cube and a typical cut locus on it. This shows the +numbering of the corner points of the cube that we will use throughout. We use the +back face of that cube as the domain for our points P. + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +3 +Figure 1.2. A cube with labeled corner points, and the cut locus for the +middle point of an edge highlighted +One motivation for this work was [3], which considered geodesic motion-planning +rules on a cube. Another was [1], which considered bounds for the number of equiv- +alence classes of cut loci on a convex polyhedron. +2. Statement of results +In this section, we state our main result, a complete classification of all isomorphism +classes, L, of cut loci for the cube, and, for each, the set of points P for which the +cut locus of P is in L, partitioning a face into 193 connected subsets with constant +L. Proofs of all claims will appear in Section 5. Because of the omnibus nature of +our result, we do not organize it into “Theorems.” +It suffices to consider points on one face of the cube. We show that a face of the +cube is composed of 60 connected open sets on which L is constant, together with +48 curves which bound these regions. Except for the boundary of the square and its +diagonals, each of these curves is given by a 2-variable polynomial equation of degree +2 or 3 with integer coefficients. Some of these curves have constant L, while others are +divided into two or three adjacent portions, on each of which L is constant, yielding +96 curve portions with constant L. There are 58 distinct L’s on the regions and 86 on +the curves. There are 37 points of intersection of these curves, giving 33 additional +L. +We find it convenient to use 0 ≤ x ≤ 8 and −4 ≤ y ≤ 4 as the coordinates +of P = (x, y) in our face. Figure 2.1 depicts the 15 open regions in the quadrant +Q1 = {(x, y) : 0 ≤ x ≤ 4, |y| ≤ 4 − x}. In Figure 2.1, the x-axis is stretched by a +factor of nearly 5 in order to better display the regions. Figure 1.1 depicts the whole + +8 +7 +4 +T +3 +1 +1 +6 +1 +24 +DONALD M. DAVIS AND MANYI GUO +square, illustrating how regions in the other three quadrants are copies of the regions +in the quadrant Q1 rotated around the center of the square. We will explain how the +L in the regions in these quadrants are obtained by permuting the corner numbers +1-8 in L. +Figure 2.1. Regions in quadrant Q1 +In Figure 2.2, we present the L for points in regions A-I in Figure 2.1. +The +L in the primed regions in Figure 2.1 are obtained by applying the permutation +τ = (1 4)(2 3)(5 8)(6 7) to the corner numbers in the L of the corresponding unprimed +region D-I. Note that the graphs which appear in Figure 2.2 represent isomorphism +classes of labeled graphs, and so whether an edge points to the left or right is irrelevant, +as is the vertical orientation of the graph. + +4 +3- +G +2 +D +F +1- +E +H +c +A +B +0 +H' +E' +-1 +-2 +D" +G' +E- +-4 +0.1 +0.2 +0 +E0 +0.4 +0.5 +0.6 +0.7 +0.8ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +5 +Figure 2.2. L in regions. +2 +6 +2 +5 +1 +1 +4 +7 +3 +5 +8 +4 +8 +7 +3 +6 +5 +8 +2 +6 +1 +7 +4 +3 +2 +8 +6 +5 +1 +7 +4 +3 +6 +7 +1 +4 +5 +2 +8 +3 +A +B +C +D +E +2 +4 +6 +5 +1 +7 +3 +8 +4 +8 +3 +7 +6 +1 +2 +5 +2 +4 +6 +5 +1 +7 +8 +3 +2 +8 +3 +7 +4 +1 +5 +6 +F +G +H +I +Each region R in the top quadrant in Figure 1.1 is obtained from the corresponding +region R0 in quadrant Q1 by a clockwise rotation of π/2 around the center of the +square. The L for R is obtained from that of R0 by applying the permutation σ = +(1 4 3 2)(5 8 7 6) to the corner numbers at vertices. Similarly, regions along the right +edge are a π-rotation of R0 and have their L obtained using the permutation σ2 = +(1 3)(2 4)(5 7)(6 8). Finally, a clockwise rotation of 3π/2 applies σ3 = (1 2 3 4)(5 6 7 8) +to the numbers at vertices of L. One can check that, for the 15 regions R0 in Q1, the +L for σiR0, 0 ≤ i ≤ 3, are distinct except that σ2+εLA = σεLA for ε = 0, 1, yielding +58 distinct L for the regions on the face, each L having six degree-3 vertices. The +notation LA refers to the L of points in the region A. +There are five curves and their vertical reflections which bound pairs of regions +in Figure 2.1. A single curve usually bounds more than one pair of regions. Its L +will be different for different pairs. In every case, the L for the curve is obtained by +collapsing to a point one segment of the L for each region which it bounds. +For each curve, we list in (2.3) the pairs of regions which it bounds, followed by its +equation. Then in Figure 2.4, we present the L for the various curve portions. For +example, the first curve, which appears almost horizontal in Figure 1.1 but is actually +an arc of a circle with large radius, bounds regions B and D, and then has a short + +6 +DONALD M. DAVIS AND MANYI GUO +portion bounding regions E and I, and its L for each of these portions is presented in +Figure 2.4. The intersection point of these two portions has a different L, which will +be described, along with its coordinates, later in this section. +BD, EI +x2 + y2 − 24y + 16 = 0 +(2.3) +DE, BI, CI′ +y3 + (3x + 12)y2 + (x2 + 40x − 16)y + 3x3 − 44x2 + 304x − 192 = 0 +EF +y3 + (x − 12)y2 + (x2 + 8x − 16)y + x3 − 20x2 − 240x + 192 = 0 +FG, HA, CH′ +x3 − 4x2 + (y2 + 8y − 80)x − 4y2 + 64 = 0 +GA, FH +x3 − 12x2 + (y2 − 24y + 112)x + 4y2 − 64 = 0 +Figure 2.4. L on curves. +8 +4 +5 +3 +7 +1 +6 +2 +8 +4 +2 +3 +7 +1 +5 +6 +BD +EI +DE +8 +5 +3 +4 +7 +1 +6 +2 +BI +8 +5 +3 +7 +4 +1 +6 +2 +CI′ +3 +5 +7 +8 +4 +1 +6 +2 +4 +2 +3 +7 +1 +5 +6 +8 +EF +4 +1 +2 +8 +3 +7 +5 +6 +FG +4 +7 +8 +3 +1 +2 +5 +6 +HA +3 +1 +2 +7 +8 +4 +5 +6 +CH′ +GA +4 +3 +8 +7 +1 +6 +5 +2 +4 +1 +2 +8 +3 +7 +5 +6 +FH +The vertical reflection of the curves is obtained by replacing y by −y in the equa- +tions, and their L is obtained using the permutation τ = (1 4)(2 3)(5 8)(6 7), as +before. For the other three quadrants, the equations can be modified in an obvi- +ous way, and the L obtained using the same permutations as were used for regions. +One can check that, for the 11 curve segments s in Figure 2.4 and for ε = 0, 1 and +0 ≤ i ≤ 3, the L for τ εσis are distinct except that τ εσ2+iLGA = τ εσiLHA. This gives +88 − 8 distinct L’s for curve portions. All of these L’s have five degree-3 vertices. +In addition, there are 6 more L’s, coming from the edges and half-diagonals of the +square. The entire left edge of our face has constant L, as does the half diagonal + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +7 +h connecting the center of the face with the upper left corner. +These are shown +in Figure 2.5. These are the first cases where a corner point does not appear at a +leaf, but rather at a degree-2 vertex of the cut locus. Applying the permutations +σ, σ2, and σ3 described earlier gives the L’s on the other edges and half diagonals. +However, σ2+εLh = σεLh. Combining with those described above yields 86 distinct +L’s on portions of curves. +Figure 2.5. L on left edge and upper-left half diagonal. +8 +4 +1 +5 +7 +6 +3 +2 +8 +3 +7 +6 +1 +5 +2 +4 +left edge +half diagonal +Not including the y-axis, Figure 2.1 has eight intersection points of curves. Three +below the x-axis are obtained by vertical reflection, and their L is obtained using the +usual permutation τ. We list the other five, notating them by the regions abutting +them, and include their coordinates. +BDEI +(0.6413, 0.7045) +EFHCI +(0.7085, 0.7085) +FGHA +(0.8, 1.6) +BII′C +(0.6989, 0) +CHH′A +(0.7757, 0) +More precisely, the 0.7085 is 6 − 2 +√ +7, while the 0.7045, 0.6989, and 0.7757 are roots +of the polynomials 37y4 − 816y3 + 304y2 − 3456y + 2560, 3x3 − 44x2 + 304x − 192, +and x3 − 4x2 − 80x + 64, respectively. The L for the five vertices are shown in Figure +2.6. The BD curve intersects the y-axis at (0, 0.685), but this point does not give a +new L, since L is constant on the y-axis (for |y| < 4). + +8 +DONALD M. DAVIS AND MANYI GUO +Figure 2.6. L for intersection points. +8 +4 +5 +3 +7 +1 +6 +2 +BDEI +4 +2 +7 +1 +5 +6 +8 +3 +EFHCI +4 +3 +1 +2 +8 +7 +5 +6 +FGHA +3 +4 +1 +2 +7 +8 +5 +6 +CHH′A +8 +5 +7 +4 +1 +6 +2 +3 BII′C +Including τL for the first three L in Figure 2.6 and applying σi, 0 ≤ i ≤ 3 to all +gives 32 − 4 distinct L, as τ εσ2+iLFGHA = τ εσiLFGHA. +Finally, there are vertices at the center of the face and at each corner. The L for +the center and the top-left corner are presented in Figure 2.7. Those for the other +corners are obtained using the usual permutations. +Figure 2.7. L for special points. +8 +4 +7 +3 +6 +2 +1 +5 +center +4 +3 +7 +1 +5 +6 +2 +corner +3. Background +In this section we explain how the method for finding cut loci of convex polyhedra +developed in [1] and [2] applies to a cube. This involves star unfolding and Voronoi +diagrams. +We consider the cube with corner points numbered as in Figure 1.2, and let P be a +point on the back (5678) face. In a planar model M of all faces of the cube except the +front (1234) face, choose a shortest path connecting P to each corner point. These +are called cuts. See Figure 3.1. + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +9 +Figure 3.1. Example of cuts with respect to P +3 +2 +1 +4 +3 +2 +1 +4 +P +5 +8 +6 +7 +These decompose M as the union of eight polygons, with P at a vertex of each, +and edges 12, 23, 34, 41 and 15, 26, 37, and 48 at far ends of the polygons. The star +unfolding of P is obtained by first gluing to the 1234 square the polygons with far +edges 12, 23, 34, and 41. This will expose new edges 15, 26, 37, and 48, and we then +glue the other four polygons to the corresponding edges. See Figure 3.2. This yields +a polygon with eight vertices corresponding to corner points of the cube, and eight +corresponding to occurrences of the point P, which we number as in Figure 3.2. This +is the star unfolding, S, of the point P. +Figure 3.2. A star unfolding S +1 +2 +3 +4 +5 +6 +7 +8 +P1 +P2 +P3 +P4 +P5 +P6 +P7 +P8 +Recall that our coordinates for the 5678 face of the cube are 0 ≤ x ≤ 8 and +−4 ≤ y ≤ 4. +We will initially consider points P in the quadrant Q1 given by + +10 +DONALD M. DAVIS AND MANYI GUO +−4 ≤ y ≤ 4 and 0 ≤ x ≤ 4 − |y|. Points in other quadrants will be considered later +by rotating the cube. +We use (v, w) as the coordinate system for the plane containing S, with (0, 0) at +the midpoint of segment 2-3 in Figure 3.2, and sides of the two squares having length +8. The coordinates of the points labeled 1-8 are, respectively, (−8, 4), (0, 4), (0, −4), +(−8, −4), (−8, 12), (8, 4), (8, −4), and (−8, −12). The coordinates (vα, wα) of the +points Pα are as in (3.3). +P1 = (−16 − x, −y), +P5 = (16 − x, −y), +P2 = (−12 − y, 12 + x), +P6 = (12 − y, −12 + x), +(3.3) +P3 = (−8 + x, 16 + y), +P7 = (−8 + x, −16 + y), +P4 = (12 + y, 12 − x), +P8 = (−12 + y, −12 − x). +For each point Pα, 1 ≤ α ≤ 8, its Voronoi cell Cα is the set of points Q in S such +that +d(Q, Pα) ≤ d(Q, Pβ) +for 1 ≤ β ≤ 8. The points of S which lie in more than one Cα comprise the cut locus +LP of P. In Figure 3.4, we show the Voronoi cells and cut locus of the point P in +Figure 3.1. +Figure 3.4. Voronoi cells and cut locus of P +1 +2 +3 +4 +5 +6 +7 +8 +P1 +P2 +P3 +P4 +P5 +P6 +P7 +P8 + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +11 +All segments in the cut locus are portions of perpendicular bisectors ⊥α,β of the +segments joining Pα and Pβ. One needs to consider how various ⊥α,γ intersect to +decide when a portion of ⊥α,β is closer to Pα and Pβ than to other Pγ’s. An example +of this is discussed in Section 4. +4. Determination of a cut locus +We used Maple to help us find the cut locus for many points in quadrant Q1. We +illustrate here with the top half of the cut locus of the point P = (x, y) = (1.5, 0.5). +Substituting these values into the equations (3.3), we obtain the coordinates of the +points Pα = (vα, wα) for 1 ≤ α ≤ 8. The equation of the perpendicular bisector, ⊥α,β, +of the segment connecting points Pα and Pβ is +w = +� +� +� +wα + wβ +2 ++ vα − vβ +wβ − wα +� +v − vα + vβ +2 +� +{α, β} ̸= {1, 5} +−x +{α, β} = {1, 5}. +(4.1) +Maple plots a selected batch of these lines ⊥α,β in a specified grid. The grid in +Figure 4.2 is [−3.5, 0.5] × [0, 3]. Here we have included just those relevant for the top +half of the cut locus, which appears in red. Other lines such as ⊥2,4 and ⊥2,5 would +usually be considered for possible relevance. Trying to do this sort of analysis for the +top and bottom halves of the cut locus together leads to an unwieldy collection of +perpendicular bisectors. In Section 6, we show that it suffices to consider the top and +bottom parts separately. When crucial intersection points are very close together, we +can change the grid to effectively zoom in. + +12 +DONALD M. DAVIS AND MANYI GUO +Figure 4.2. Finding a cut locus. +2 3 +21 +3 +1 +4 +5 +3 4 +35 +1 5 +Points equidistant from Pα and Pβ lie on ⊥α,β. +In Figure 4.2, the line ⊥α,β is +annotated with α on one side and β on the other, indicating the side closer to Pα or +Pβ. The Voronoi cell for a point Pα is bounded by portions of lines ⊥α,β for various β, +with α on the cell side of each ⊥α,β. For example, the Voronoi cell for P3 is bounded +by portions of ⊥3,2, ⊥3,1, ⊥3,5, and ⊥3,4, reading from left to right in Figure 4.2. +Although we use the various ⊥α,β to determine the cut loci, the eventual description +of the cut locus is in terms of the corner points of the cube at certain vertices of the +cut locus. As seen in Figure 3.2, the corner points on lines ⊥1,2, ⊥2,3, ⊥3,4, and ⊥4,5 +are 1, 5, 2, and 6, respectively, and so the top half of the cut locus of the point +P = (1.5, 0.5) is as depicted in Figure 4.3. +Figure 4.3. Top half of a cut locus. +1 +5 +2 +6 +5. Proofs +In this section, we show how the regions and curves and their cut loci are obtained. + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +13 +The coordinate systems are as described in Section 3. Let [8] = {1, 2, 3, 4, 5, 6, 7, 8}. +For P = (x, y) ∈ Q1 and α ∈ [8], let Pα be the point in the star unfolding described +earlier. Its coordinates (vα, wα) are linear expressions, (3.3), in x and y. In Figure +3.2, we depict a typical star unfolding. The vertices P1, . . . , P8 are the focal points for +the Voronoi cells, while the vertices with numbered labels correspond to the corner +points of the cube. +For α, β ∈ [8], let ⊥α,β (P) denote the perpendicular bisector of the segment con- +necting Pα and Pβ. Its equation is (4.1). Note that ⊥α,α+1 has as its extreme point +in the star unfolding the corner point 1, 5, 2, 6, 7, 3, 8, and 4, for α = 1, . . . , 8. +Although our results about L are described in terms of the corner points, our work +is done in terms of the α. +For S = {α, β, γ} ⊂ [8], let πS(P) denote the intersection of ⊥α,β (P) and ⊥β,γ (P) +(and ⊥α,γ (P), as πS(P) is the center of the circle passing through Pα, Pβ, and Pγ.). +Let LP denote the cut locus of P. It is formed from portions of various ⊥α,β (P) +which are closer to Pα and Pβ than to any other Pγ. The degree-3 vertices of LP are +certain πS(P). From now on, we will usually omit the (P) and the set symbols in +subscripts. +A transition from one isomorphism class of LP to another as P varies will occur +when πα,β,γ passes through another ⊥β,δ. +This is illustrated in Figure 5.1. +The +references to t and t0 will be used in Section 6. In the left side of the figure, πα,β,γ is +part of LP since it is closer to Pα, Pβ, and Pγ than to any other P-point, but as P +changes and πα,β,γ moves across ⊥β,δ, it is now closer to Pδ, and so is not part of LP. + +14 +DONALD M. DAVIS AND MANYI GUO +Figure 5.1. Transition. +πα,β,γ +α +γ +γ β +β α +δα +δβ +δ +γ +t < t0 +t = t0 +t > t0 +γ β +γ α +β α +γδ +αδ +βδ +πα,β,γ +γ α +δβ +β α +γ β +δα +δγ +We will show in Section 6 that this type of transition is the only way to change from +one L to another. +The v-coordinate of πα,β,γ is found by equating the right hand side of (4.1) for +(α, β) and (β, γ), using the formulas for vα, wα, etc., in terms of x and y given in +(3.3). This yields a formula for v = vα,β,γ in terms of x and y. We assume first that +{α, β, γ} does not contain both 1 and 5. +The relationship between x and y such that πα,β,γ and πα,β,δ coincide (and hence +a transition might occur) is the equation vα,β,γ = vα,β,δ. This yields a fourth-degree +equation. We let Maple do the work. We find the equation for (α, β, γ, δ) = (1, 2, 3, 4) +and for (2, 3, 4, 5) as follows. +v[1]:=-16-x: w[1]:=-y: v[2]:=-12-y: w[2]:=12+x: v[3]:=-8+x: w[3]:=16+y: +v[4]:=12+y: w[4]:=12-x: v[5]:=16-x: w[5]:=-y: +A:=solve((w[a]+w[b])/2+(v[a]-v[b])/(w[b]-w[a])(v-(v[a]+v[b])/2) +=(w[a]+w[c])/2+(v[a]-v[c])/(w[c]-w[a])(v-(v[a]+v[c])/2),v): +B:=solve((w[a]+w[b])/2+(v[a]-v[b])/(w[b]-w[a])(v-(v[a]+v[b])/2) +=(w[a]+w[d])/2+(v[a]-v[d])/(w[d]-w[a])(v-(v[a]+v[d])/2),v): +simplify(numer(A)denom(B)-numer(B)denom(A)) + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +15 +Note that the expressions for A and B will be rational expressions, and so the last +line gives a polynomial which equals 0. +For (a,b,c,d)=(1,2,3,4), this yields +4(y3+(12−x)y2+(x2+8x−16)y−x3+20x2+240x−192)(4+y+x)(= 0) +and for (a, b, c, d) = (2, 3, 4, 5), it yields +(y3+(3x+12)y2+(x2+40x−16)y+3x3−44x2+304x−192)(4+y−x)(= 0). +The cubic factor of the 2345 curve is the second of the five equations listed in (2.3). +The 1234 equation here gives the vertical reflection of the EF equation in (2.3). +For {1, β, γ, 5}, since ⊥1,5 is the line v = −x, we do A above for 1, b, and c, omit +B, and simplify (numer(A)+x·denom(A)). This yields (beginning a practice of often +omitting commas) +1235 +x3 − 4x2 + (y2 + 8y − 80)x − 4y2 + 64(= 0) +1245 +x(x2 + y2 + 24y + 16)(= 0) +(5.2) +1345 +x3 − 12x2 + (y2 + 24y + 112)x + 4y2 − 64(= 0). +For each of these five cases, if 2, 3, and 4 are replaced by 8, 7, and 6, respectively, +the equation is obtained by replacing y by −y. Altogether we have ten equations. +Compare with equations (2.3). +We describe LP for P in the top half of Q1 by the sets S for which πS is a degree-3 +vertex of LP. Later in this section we will explain how we translate this description to +the description involving corner points of the cube, which appeared in Section 2. For +example, the case P = (1.5, 0.5) considered in Section 4 has πS for S = 123, 135, and +345 in its top half. Maple plotting of perpendicular bisectors shows that the bottom +half of this LP is essentially a flip of the top half, so has πS for S = 178, 157, and +567. +In Section 6, we show that the only possible transitions from one L to another are +of the type illustrated in Figure 5.1, where an αβγδ intersection bounds one region +whose L has πα,β,γ and πα,γ,δ vertices and another with πα,β,δ and πβ,γ,δ. The point +is that the 4-set2 defining the bounding curve must have two 3-subsets in each of the +regions on either side of it. So, for example, the 1568 curve could not bound a region +2We use this to denote a set with 4 elements. + +16 +DONALD M. DAVIS AND MANYI GUO +containing L(1.5,0.5) because there are not two of the six 3-sets S for L(1.5,0.5) listed in +the previous paragraph which are contained in {1, 5, 6, 8}. +Of the ten equations determined above, all except the ones corresponding to 1568 +and 1245 intersect the top half of Q1 in a curve which we denote as x = θαβγδ(y) for +0 ≤ y ≤ 4. Each y has three x values as solutions, but we neglect those that are +complex or outside the region 0 ≤ x ≤ 4−y. The equation for 1245 does not intersect +this region, and the one for 1568 does so only for 0.685 ≤ y ≤ 1.07. +Maple shows that, for 1.6 < y < 4, +θ1345(y) < θ2345(y) < θ1578(y) < θ1678(y) < θ5678(y) < θ1234(y) < θ1235(y) < θ1567(y), +and that for 0 ≤ y ≤ 4 all eight of these curves satisfy 0 ≤ θαβγδ(y) ≤ 0.83. For P +in quadrant Q1, LP has the type of the case P = (1.5, 0.5) considered above, with +degree-3 vertices corresponding to S = 123, 135, 345, 178, 157, and 567, until a +transition occurs. This will define region A in Figure 2.1. +Now let 1.6 < y < 4. Since there are no αβγδ intersections of the eight types in the +above string of inequalities in the region R = {(x, y) : 0 ≤ y ≤ 4, 0.83 ≤ x < 4 − y}, +and, as noted above, a 1568 intersection cannot affect L(1.5,0.5), we conclude that for +all (x, y) in R, L(x,y) = L(1.5,0.5), with degree-3 vertices 123, 135, 345, 178, 157, and +567. For this, we also need an observation in Section 6 that no other αβγδ can have +an effect. +As we move from the right, when the point P = (θ1567(y), y) is encountered, there +is a transition from 157 and 567 to 156 and 167. This is region G, with 123, 135, 345, +178, 156, and 167. +Next we encounter P = (θ1235(y), y), and this causes a transition to 125, 235, 345, +178, 156, and 167. This is region F. The next two potential transitions at 1234 +and 5678 do not effect a change, because neither of these 4-sets contain two 3-subsets +which are vertices of region-F cut loci. Next at P = (θ1678(y), y) we have a transition, +changing 178 and 167, leading to region E described by 125, 235, 345, 168, 156, and +678. The next potential transition, 1578, does not effect a change, but then 2345 +does, to 125, 234, 245, 168, 156, and 678 in region D. Finally, 1345 does not effect a +change because it does not have two 3-sets of region D. +Before we discuss other ranges of values of y, we point out that when a curve is +crossed, it gives a degree-4 vertex of the cut locus, as shown in the middle part of + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +17 +Figure 5.1. Thus, for points P on the curve GA separating regions G and A, LP +has vertices abutting regions 123, 135, 345, 178, and 1567 of the star unfolding, and +similarly for points on the other curves crossed in the above analysis. We also note +that θ1567(1.6) = 0.8 = θ1235(1.6). +The same procedure is followed for other intervals of values of y, arranging the +4-sets S according to the order of θS(y), and then working from right-to-left to see +whether the transitions are effective, i.e., whether S contains two 3-sets which are +vertices of the region under consideration. For 0.7085 < y < 1.6, the only change +from the above order which causes a different transition is that θ1235(y) is now greater +than θ1567(y), so the 1235 change takes place first, leading to region H with vertices +125, 235, 345, 157, 567, and 178. +The most interesting point is (6 − 2 +√ +7, 6 − 2 +√ +7) ≈ (.7085, .7085), which lies on +all of θ1567, θ1678, θ1568, θ1578, and θ5678.3 These five curves reverse their order at +y = 6 − 2 +√ +7. For 6 − 2 +√ +7 < y < .715, +θ2345(y) < θ1578(y) < θ1678(y) < θ5678(y) < θ1567(y) < θ1568(y) < θ1234(y) < θ1235(y), +which has the transitions described in the preceding paragraph, but for .7045 < y < +6 − 2 +√ +7, +θ2345(y) < θ1568(y) < θ1567(y) < θ5678(y) < θ1678(y) < θ1578(y) < θ1234(y) < θ1235(y), +which has a different order of transitions. Let .7045 < y < 6 − 2 +√ +7. After the 1235 +change, the next one is 1578, leading to region C with vertices 125, 235, 345, 158, +567, and 578. The next transition is due to 5678, leading to region I with vertices +125, 235, 345, 158, 568, and 678. The next transition is due to 1568, which brings us +into region E, with vertices 125, 235, 345, 168, 156, and 678, which were already seen +when considering larger values of y. Finally, a 2345 transition brings us into region +D as above. +The 2345 and 1568 curves intersect at y ≈ 0.7045, so for y < 0.7045, the 2345 +transition precedes the 1568 transition, leading to region B with vertices 125, 234, +245, 158, 568, and 678. For y > .685, there will be a 1568 transition into region D, +but for y < .685, there is no 1568 transition since θ1568(y) < 0 if y < .685. +3To see this remarkable fact, recall that these five curves are obtained by replac- +ing y by −y in the polynomials in (5.2) and the paragraph it. After doing this, let +y = x. Each of the resulting polynomials equals x2 − 12x + 8 times a linear factor. + +18 +DONALD M. DAVIS AND MANYI GUO +This completes the description of the regions of the top half of quadrant Q1 with +constant L, described in terms of the Voronoi cells. Now we translate this description +into one which has the cube’s corner numbers at the leaves, which is the description +given in Section 2, and is needed for giving permuted descriptions in other quadrants. +In Figure 5.3, we show how the top half of the cut loci appear in terms of Voronoi +cells, and list the regions in Figure 2.1 in which they appear. Each edge leading +to a leaf is a perpendicular bisector separating Voronoi cells i and i + 1 for some i +mod 8. For i = 1, 2, 3, 4, the corner point at the end of this bisector is 1, 5, 2, 6, +respectively, as can be seen in Figure 3.2. The reader can check that this labeled +diagram is consistent with the L in Figure 2.2. +Figure 5.3. Top half of cut loci. +1 +5 +3 +2 +4 +A, G +123, 135, 345 +1 +2 +3 +4 +5 +C, E, F, H, I +125, 235, 345 +1 +2 +5 +3 +4 +B, D +125, 234, 245 +In Figure 5.4, we do the same thing for the bottom half of cut loci in the top half +of Q1. The corner numbers at the ends of segments bounding Voronoi cells 5 and 6, +6 and 7, 7 and 8, and 8 and 1 are 7, 3, 8, and 4, respectively. +Figure 5.4. Bottom half of cut loci. +7 +6 +8 +1 +5 +A, H +157, 567, 178 +1 +5 +6 +7 +8 +C +158, 567, 578 +7 +6 +8 +5 +1 +B, I +158, 568, 678 +7 +6 +5 +1 +8 +D, E +156, 168, 678 +8 +7 +1 +5 +6 +F, G +156, 167, 178 +A similar discussion could be made for the L associated to the curves. But it is +easier and more insightful to note how the L for a curve bounding two regions is +obtained from that of each of the two regions by collapsing a segment in which the +two regions differ. For example, the L for the BD curve in Figure 2.4 is obtained + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +19 +from those in region B or D in Figure 2.2 by collapsing the segment connecting the +edges leading to corner points 4 and 7. Similarly, the L for points of intersection of +two curves is obtained by collapsing a segment in the L of each. For example, the L +for point BDEI in Figure 2.6 is obtained from those of curves BD and EI in Figure +2.4 by collapsing in each the highest vertical interval. +The L’s in Figures 2.5 and 2.7 are different from those seen previously in that they +have a corner point labeling a degree-2 vertex. In these cases, the choice of cuts is +not unique, but, of course, the cut locus does not depend on the choice. We comment +briefly on the L in these cases. +If P is on the left edge of the cube, the L is as seen in Figure 1.2. If P is at a +corner point of the cube, the cut locus consists of segments from the corner point +opposite P to each of the other corner points. If P is at the center of a face F, the +cut locus consists of the diagonals of the opposite face F op and the four edges of the +cube connecting F and F op. +If P = (x, 4 − x) with 0 < x < 4 is on the half-diagonal, then ⊥4,5 is the line +w = 4, which intersects the point in the star-unfolding corresponding to corner point +2. Then the short segment connecting the point π3,4,5 in Figure 3.4 with the point +labeled 2 will have collapsed to a point. In the A diagram in Figure 2.2, this is the +collapse of the vertical segment from the point labeled 2. This can be seen in terms of +the Voronoi cells in the A-part of Figure 5.3. A similar thing happens to the vertical +segment leading to the point labeled 8, as the equation of ⊥7,8 is v = −8. +In Section 2, we discussed how a permutation τ (resp. σ) applied to corner points +yields L in the vertical flip (resp. 90-degree clockwise rotation) of a region or curve. +Here we give a brief explanation of the reason for that. Such a motion applied to +a point P in the region has the same effect on geodesics from P, and hence on LP. +Referring to Figure 3.1, we see that, for example, the corner point 8 in LP will be +replaced by 5 (resp. 7), which expands to the asserted permutations. +6. No other transitions +In this section, we present a proof that there are no regions other than those +described earlier. + +20 +DONALD M. DAVIS AND MANYI GUO +Suppose LP0 ̸= LP1. Let P(t) = (1 − t)P0 + tP1, and, for any 3-subset S of [8], let +πS(t) = πS(P(t)), a path in the vw-plane. For each S such that πS(0) is a vertex of +LP0, let +t0(S) = sup{t ∈ [0, 1] : πS(t′) is a vertex of LP(t′) ∀t′ < t}, +and let +t0 = min{t0(S) : πS(0) is a vertex of LP0}. +Finally, let S = {α, β, γ} satisfy t0(S) = t0. The first transition in moving from P0 +to P1 will involve πS(P(t0)). +Proposition 6.1. There exists δ ∈ [8] − S and a decomposition of S as {β} ∪ {α, γ} +such that π{α,γ,δ}(0) is a vertex of LP0, and πS(t0) = π{α,γ,δ}(t0) is a common vertex +of LP(t0). +Proof. There exists ε > 0 and δ ∈ [8] − S such that for t in the interval (t0, t0 + ε), +πS(t) is closer to Pδ than it is to Pα, Pβ, and Pγ. The path πS crosses ⊥δ,η (P(t0)) +for some η ∈ [8], and η must equal α, β, or γ, since for t in some interval (t0 − ε′, t0), +πS(t) is closer to Pα, Pβ, and Pγ than it is to any other Pη. Without loss of generality, +say η = β. Then πS(t0) intersects ⊥β,δ (P(t0)), and so all six perpendicular bisectors +from {α, β, γ, δ} intersect in LP(t0). By minimality of t0, since π{α,γ,δ}(t0) ∈ LP(t0), we +conclude that π{α,γ,δ}(0) ∈ LP0. See Figure 5.1 for a depiction of this transition. +Theorem 6.2. There are no transitions except those claimed earlier in the manu- +script. +Proof. Let S1 = {2, 3, 4}, S2 = {1, 5}, and S3 = {6, 7, 8}. Recall that all of our +asserted regions in Q1 have L with three vertices from S1 ∪S2 and three from S2 ∪S3. +If LP0 ̸= LP1 with LP0 in one of our regions, and t0 is as above, so that we are +considering the first transition in moving from P0 to P1, then the set {α, β, γ, δ} +involved in the transition must either contain S2 or else equal one of {1, 2, 3, 4}, +{2, 3, 4, 5}, {1, 6, 7, 8}, or {5, 6, 7, 8}. This is true since sets with elements of type +S1S2S3S3, S1S1S2S3, S1S1S3S3, S1S1S1S3, or S1S3S3S3 do not contain two 3-subsets +of the type of the vertices of LP0. In our earlier determination of the regions in Q1, we +considered the four specific sets listed above (containing a single 1 or 5), and also all + +ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE +21 +sets with elements of type S1S1S2S2 and S2S2S3S3. It remains to consider {1, 5, α, β} +with α ∈ S1 and β ∈ S3. If P ∈ Q1, we use Maple, similarly to (5.2), to see that, +for α ∈ S1 and β ∈ S3, π{1,α,β}(P) does not lie on ⊥1,5 (P). Thus there can be no +transitions other than the ones described earlier in the paper. +We explain briefly the Maple work that led to this conclusion. We follow steps that +led to (5.2) but using one of {β, γ} in S1 and one in S3. We obtain equations similar +to (5.2). We plot them and find that there are no solutions satisfying −4 < y < 4, +0 < x < 4 − |y|. +References +[1] P. Agarwal, B. Arnov, J. O’Rourke, and C. Schevon, Star Unfolding of a +Polytope with Applications, SIAM J. Comput., 26 (1997) 1689–1713. +[2] J. O’Rourke and C.Vilcu, Cut Locus Realizations on Convex Polyhedra, CCCG +(2021), arXiv 2102.11097. +[3] D. Recio-Mitter, Geodesic complexity of motion planning, J. Appl. Comput. +Topol, 5 (2021) 141–178. +Department of Mathematics, Lehigh University, Bethlehem, PA 18015, USA +Email address: dmd1@lehigh.edu +Department of Mathematics, Lehigh University, Bethlehem, PA 18015, USA +Email address: maga23@lehigh.edu + diff --git a/xNFIT4oBgHgl3EQfzyta/content/tmp_files/load_file.txt b/xNFIT4oBgHgl3EQfzyta/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7cc5c3b8cd98b16c52a3872b485bf46e713f40d --- /dev/null +++ b/xNFIT4oBgHgl3EQfzyta/content/tmp_files/load_file.txt @@ -0,0 +1,495 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf,len=494 +page_content='ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We prove that a face of a cube can be partitioned into 193 sets on which the cut locus, or ridge tree, is constant up to isomorphism as a labeled graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' These are 60 connected open sets and curves bounding them, and intersection points of curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Polynomial equations for the curves are given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Sixteen pairs of sets support the same cut locus class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We present the 177 distinct cut locus classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Introduction The cut locus, or ridge tree, of a point P on a convex polyhdedron is the set of points Q for which there is more than one shortest path from P to Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Each cut locus is a tree whose leaves are corner points1 of the polyhedron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In [1] and [2], methods were developed for determining the cut locus of a point, which we describe in Section 3 and utilize.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The cut locus of P varies continuously with P unless P is a corner point of the polyhedron, but its combinatorial structure can change abruptly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We think of a cut locus as a graph with some vertices labeled by corner points of the polyhedron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We define an equivalence relation for these labeled graphs by edge-preserving vertex bijection that preserves labels, and denote by L the equivalence class of a cut locus L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In this paper, we consider the cut loci for a cube and find a complete decomposition of a face of a cube into subsets on which L is constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The subsets are connected open sets, curves bounding these sets, and single points where the curves intersect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' These are accurately rendered in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 gives an expanded version of regions in the left quadrant of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We find that there are 193 subsets altogether, but Date: January 26, 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' cut locus, ridge tree, geodesic, cube, star unfolding, Voronoi diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 2000 Mathematics Subject Classification: 52B10, 53C22, 52C30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 1We reserve the term vertex for vertices of the star unfolding and cut locus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='11366v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='MG] 26 Jan 2023 2 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO of these there are 16 pairs which have the same L and so there are 177 distinct L on a face of a cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Decomposition of a face into subsets on which L is constant In Section 2, we give a precise statement of results, including equations of the curves bounding the regions, and the labeled graphs for a representative set of L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Section 3, we present some preliminary information and tools from [2] and [1] needed in our work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Section 5 gives a proof that the regions and isomorphism classes of their cut loci are as described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' And in Section 6, we prove the completeness of this result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, we picture a cube and a typical cut locus on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This shows the numbering of the corner points of the cube that we will use throughout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We use the back face of that cube as the domain for our points P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 3 Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' A cube with labeled corner points, and the cut locus for the middle point of an edge highlighted One motivation for this work was [3], which considered geodesic motion-planning rules on a cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Another was [1], which considered bounds for the number of equiv- alence classes of cut loci on a convex polyhedron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Statement of results In this section, we state our main result, a complete classification of all isomorphism classes, L, of cut loci for the cube, and, for each, the set of points P for which the cut locus of P is in L, partitioning a face into 193 connected subsets with constant L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Proofs of all claims will appear in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Because of the omnibus nature of our result, we do not organize it into “Theorems.” It suffices to consider points on one face of the cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We show that a face of the cube is composed of 60 connected open sets on which L is constant, together with 48 curves which bound these regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Except for the boundary of the square and its diagonals, each of these curves is given by a 2-variable polynomial equation of degree 2 or 3 with integer coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Some of these curves have constant L, while others are divided into two or three adjacent portions, on each of which L is constant, yielding 96 curve portions with constant L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' There are 58 distinct L’s on the regions and 86 on the curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' There are 37 points of intersection of these curves, giving 33 additional L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We find it convenient to use 0 ≤ x ≤ 8 and −4 ≤ y ≤ 4 as the coordinates of P = (x, y) in our face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 depicts the 15 open regions in the quadrant Q1 = {(x, y) : 0 ≤ x ≤ 4, |y| ≤ 4 − x}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1, the x-axis is stretched by a factor of nearly 5 in order to better display the regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 depicts the whole 8 7 4 T 3 1 1 6 1 24 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO square, illustrating how regions in the other three quadrants are copies of the regions in the quadrant Q1 rotated around the center of the square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We will explain how the L in the regions in these quadrants are obtained by permuting the corner numbers 1-8 in L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Regions in quadrant Q1 In Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, we present the L for points in regions A-I in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The L in the primed regions in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 are obtained by applying the permutation τ = (1 4)(2 3)(5 8)(6 7) to the corner numbers in the L of the corresponding unprimed region D-I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Note that the graphs which appear in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2 represent isomorphism classes of labeled graphs, and so whether an edge points to the left or right is irrelevant, as is the vertical orientation of the graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 4 3- G 2 D F 1- E H c A B 0 H\' E\' 1 2 D" G\' E- 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2 0 E0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='8ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 5 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' L in regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 2 6 2 5 1 1 4 7 3 5 8 4 8 7 3 6 5 8 2 6 1 7 4 3 2 8 6 5 1 7 4 3 6 7 1 4 5 2 8 3 A B C D E 2 4 6 5 1 7 3 8 4 8 3 7 6 1 2 5 2 4 6 5 1 7 8 3 2 8 3 7 4 1 5 6 F G H I Each region R in the top quadrant in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 is obtained from the corresponding region R0 in quadrant Q1 by a clockwise rotation of π/2 around the center of the square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The L for R is obtained from that of R0 by applying the permutation σ = (1 4 3 2)(5 8 7 6) to the corner numbers at vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Similarly, regions along the right edge are a π-rotation of R0 and have their L obtained using the permutation σ2 = (1 3)(2 4)(5 7)(6 8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Finally, a clockwise rotation of 3π/2 applies σ3 = (1 2 3 4)(5 6 7 8) to the numbers at vertices of L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' One can check that, for the 15 regions R0 in Q1, the L for σiR0, 0 ≤ i ≤ 3, are distinct except that σ2+εLA = σεLA for ε = 0, 1, yielding 58 distinct L for the regions on the face, each L having six degree-3 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The notation LA refers to the L of points in the region A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' There are five curves and their vertical reflections which bound pairs of regions in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' A single curve usually bounds more than one pair of regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Its L will be different for different pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In every case, the L for the curve is obtained by collapsing to a point one segment of the L for each region which it bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For each curve, we list in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3) the pairs of regions which it bounds, followed by its equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Then in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4, we present the L for the various curve portions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For example, the first curve, which appears almost horizontal in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 but is actually an arc of a circle with large radius, bounds regions B and D, and then has a short 6 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO portion bounding regions E and I, and its L for each of these portions is presented in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The intersection point of these two portions has a different L, which will be described, along with its coordinates, later in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' BD, EI x2 + y2 − 24y + 16 = 0 (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3) DE, BI, CI′ y3 + (3x + 12)y2 + (x2 + 40x − 16)y + 3x3 − 44x2 + 304x − 192 = 0 EF y3 + (x − 12)y2 + (x2 + 8x − 16)y + x3 − 20x2 − 240x + 192 = 0 FG, HA, CH′ x3 − 4x2 + (y2 + 8y − 80)x − 4y2 + 64 = 0 GA, FH x3 − 12x2 + (y2 − 24y + 112)x + 4y2 − 64 = 0 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' L on curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 8 4 5 3 7 1 6 2 8 4 2 3 7 1 5 6 BD EI DE 8 5 3 4 7 1 6 2 BI 8 5 3 7 4 1 6 2 CI′ 3 5 7 8 4 1 6 2 4 2 3 7 1 5 6 8 EF 4 1 2 8 3 7 5 6 FG 4 7 8 3 1 2 5 6 HA 3 1 2 7 8 4 5 6 CH′ GA 4 3 8 7 1 6 5 2 4 1 2 8 3 7 5 6 FH The vertical reflection of the curves is obtained by replacing y by −y in the equa- tions, and their L is obtained using the permutation τ = (1 4)(2 3)(5 8)(6 7), as before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For the other three quadrants, the equations can be modified in an obvi- ous way, and the L obtained using the same permutations as were used for regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' One can check that, for the 11 curve segments s in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4 and for ε = 0, 1 and 0 ≤ i ≤ 3, the L for τ εσis are distinct except that τ εσ2+iLGA = τ εσiLHA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This gives 88 − 8 distinct L’s for curve portions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' All of these L’s have five degree-3 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In addition, there are 6 more L’s, coming from the edges and half-diagonals of the square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The entire left edge of our face has constant L, as does the half diagonal ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 7 h connecting the center of the face with the upper left corner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' These are shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' These are the first cases where a corner point does not appear at a leaf, but rather at a degree-2 vertex of the cut locus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Applying the permutations σ, σ2, and σ3 described earlier gives the L’s on the other edges and half diagonals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' However, σ2+εLh = σεLh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Combining with those described above yields 86 distinct L’s on portions of curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' L on left edge and upper-left half diagonal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 8 4 1 5 7 6 3 2 8 3 7 6 1 5 2 4 left edge half diagonal Not including the y-axis, Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 has eight intersection points of curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Three below the x-axis are obtained by vertical reflection, and their L is obtained using the usual permutation τ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We list the other five, notating them by the regions abutting them, and include their coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' BDEI (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6413, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7045) EFHCI (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7085, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7085) FGHA (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='8, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6) BII′C (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6989, 0) CHH′A (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7757, 0) More precisely, the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7085 is 6 − 2 √ 7, while the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7045, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6989, and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7757 are roots of the polynomials 37y4 − 816y3 + 304y2 − 3456y + 2560, 3x3 − 44x2 + 304x − 192, and x3 − 4x2 − 80x + 64, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The L for the five vertices are shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The BD curve intersects the y-axis at (0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='685), but this point does not give a new L, since L is constant on the y-axis (for |y| < 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 8 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' L for intersection points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 8 4 5 3 7 1 6 2 BDEI 4 2 7 1 5 6 8 3 EFHCI 4 3 1 2 8 7 5 6 FGHA 3 4 1 2 7 8 5 6 CHH′A 8 5 7 4 1 6 2 3 BII′C Including τL for the first three L in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6 and applying σi, 0 ≤ i ≤ 3 to all gives 32 − 4 distinct L, as τ εσ2+iLFGHA = τ εσiLFGHA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Finally, there are vertices at the center of the face and at each corner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The L for the center and the top-left corner are presented in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Those for the other corners are obtained using the usual permutations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' L for special points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 8 4 7 3 6 2 1 5 center 4 3 7 1 5 6 2 corner 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Background In this section we explain how the method for finding cut loci of convex polyhedra developed in [1] and [2] applies to a cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This involves star unfolding and Voronoi diagrams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We consider the cube with corner points numbered as in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, and let P be a point on the back (5678) face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In a planar model M of all faces of the cube except the front (1234) face, choose a shortest path connecting P to each corner point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' These are called cuts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' See Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 9 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Example of cuts with respect to P 3 2 1 4 3 2 1 4 P 5 8 6 7 These decompose M as the union of eight polygons, with P at a vertex of each, and edges 12, 23, 34, 41 and 15, 26, 37, and 48 at far ends of the polygons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The star unfolding of P is obtained by first gluing to the 1234 square the polygons with far edges 12, 23, 34, and 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This will expose new edges 15, 26, 37, and 48, and we then glue the other four polygons to the corresponding edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' See Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This yields a polygon with eight vertices corresponding to corner points of the cube, and eight corresponding to occurrences of the point P, which we number as in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This is the star unfolding, S, of the point P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' A star unfolding S 1 2 3 4 5 6 7 8 P1 P2 P3 P4 P5 P6 P7 P8 Recall that our coordinates for the 5678 face of the cube are 0 ≤ x ≤ 8 and −4 ≤ y ≤ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We will initially consider points P in the quadrant Q1 given by 10 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO −4 ≤ y ≤ 4 and 0 ≤ x ≤ 4 − |y|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Points in other quadrants will be considered later by rotating the cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We use (v, w) as the coordinate system for the plane containing S, with (0, 0) at the midpoint of segment 2-3 in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, and sides of the two squares having length 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The coordinates of the points labeled 1-8 are, respectively, (−8, 4), (0, 4), (0, −4), (−8, −4), (−8, 12), (8, 4), (8, −4), and (−8, −12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The coordinates (vα, wα) of the points Pα are as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' P1 = (−16 − x, −y), P5 = (16 − x, −y), P2 = (−12 − y, 12 + x), P6 = (12 − y, −12 + x), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3) P3 = (−8 + x, 16 + y), P7 = (−8 + x, −16 + y), P4 = (12 + y, 12 − x), P8 = (−12 + y, −12 − x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For each point Pα, 1 ≤ α ≤ 8, its Voronoi cell Cα is the set of points Q in S such that d(Q, Pα) ≤ d(Q, Pβ) for 1 ≤ β ≤ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The points of S which lie in more than one Cα comprise the cut locus LP of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4, we show the Voronoi cells and cut locus of the point P in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Voronoi cells and cut locus of P 1 2 3 4 5 6 7 8 P1 P2 P3 P4 P5 P6 P7 P8 ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 11 All segments in the cut locus are portions of perpendicular bisectors ⊥α,β of the segments joining Pα and Pβ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' One needs to consider how various ⊥α,γ intersect to decide when a portion of ⊥α,β is closer to Pα and Pβ than to other Pγ’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' An example of this is discussed in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Determination of a cut locus We used Maple to help us find the cut locus for many points in quadrant Q1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We illustrate here with the top half of the cut locus of the point P = (x, y) = (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Substituting these values into the equations (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3), we obtain the coordinates of the points Pα = (vα, wα) for 1 ≤ α ≤ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The equation of the perpendicular bisector, ⊥α,β, of the segment connecting points Pα and Pβ is w = � � � wα + wβ 2 + vα − vβ wβ − wα � v − vα + vβ 2 � {α, β} ̸= {1, 5} −x {α, β} = {1, 5}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1) Maple plots a selected batch of these lines ⊥α,β in a specified grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The grid in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2 is [−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5] × [0, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Here we have included just those relevant for the top half of the cut locus, which appears in red.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Other lines such as ⊥2,4 and ⊥2,5 would usually be considered for possible relevance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Trying to do this sort of analysis for the top and bottom halves of the cut locus together leads to an unwieldy collection of perpendicular bisectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Section 6, we show that it suffices to consider the top and bottom parts separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' When crucial intersection points are very close together, we can change the grid to effectively zoom in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 12 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Finding a cut locus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 2 3 21 3 1 4 5 3 4 35 1 5 Points equidistant from Pα and Pβ lie on ⊥α,β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, the line ⊥α,β is annotated with α on one side and β on the other, indicating the side closer to Pα or Pβ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The Voronoi cell for a point Pα is bounded by portions of lines ⊥α,β for various β, with α on the cell side of each ⊥α,β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For example, the Voronoi cell for P3 is bounded by portions of ⊥3,2, ⊥3,1, ⊥3,5, and ⊥3,4, reading from left to right in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Although we use the various ⊥α,β to determine the cut loci, the eventual description of the cut locus is in terms of the corner points of the cube at certain vertices of the cut locus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' As seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, the corner points on lines ⊥1,2, ⊥2,3, ⊥3,4, and ⊥4,5 are 1, 5, 2, and 6, respectively, and so the top half of the cut locus of the point P = (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5) is as depicted in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Top half of a cut locus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 1 5 2 6 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Proofs In this section, we show how the regions and curves and their cut loci are obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 13 The coordinate systems are as described in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Let [8] = {1, 2, 3, 4, 5, 6, 7, 8}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For P = (x, y) ∈ Q1 and α ∈ [8], let Pα be the point in the star unfolding described earlier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Its coordinates (vα, wα) are linear expressions, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3), in x and y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, we depict a typical star unfolding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The vertices P1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' , P8 are the focal points for the Voronoi cells, while the vertices with numbered labels correspond to the corner points of the cube.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For α, β ∈ [8], let ⊥α,β (P) denote the perpendicular bisector of the segment con- necting Pα and Pβ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Its equation is (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Note that ⊥α,α+1 has as its extreme point in the star unfolding the corner point 1, 5, 2, 6, 7, 3, 8, and 4, for α = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' , 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Although our results about L are described in terms of the corner points, our work is done in terms of the α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For S = {α, β, γ} ⊂ [8], let πS(P) denote the intersection of ⊥α,β (P) and ⊥β,γ (P) (and ⊥α,γ (P), as πS(P) is the center of the circle passing through Pα, Pβ, and Pγ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Let LP denote the cut locus of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' It is formed from portions of various ⊥α,β (P) which are closer to Pα and Pβ than to any other Pγ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The degree-3 vertices of LP are certain πS(P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' From now on, we will usually omit the (P) and the set symbols in subscripts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' A transition from one isomorphism class of LP to another as P varies will occur when πα,β,γ passes through another ⊥β,δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This is illustrated in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The references to t and t0 will be used in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In the left side of the figure, πα,β,γ is part of LP since it is closer to Pα, Pβ, and Pγ than to any other P-point, but as P changes and πα,β,γ moves across ⊥β,δ, it is now closer to Pδ, and so is not part of LP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 14 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' πα,β,γ α γ γ β β α δα δβ δ γ t < t0 t = t0 t > t0 γ β γ α β α γδ αδ βδ πα,β,γ γ α δβ β α γ β δα δγ We will show in Section 6 that this type of transition is the only way to change from one L to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The v-coordinate of πα,β,γ is found by equating the right hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1) for (α, β) and (β, γ), using the formulas for vα, wα, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=', in terms of x and y given in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This yields a formula for v = vα,β,γ in terms of x and y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We assume first that {α, β, γ} does not contain both 1 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The relationship between x and y such that πα,β,γ and πα,β,δ coincide (and hence a transition might occur) is the equation vα,β,γ = vα,β,δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This yields a fourth-degree equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We let Maple do the work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We find the equation for (α, β, γ, δ) = (1, 2, 3, 4) and for (2, 3, 4, 5) as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' v[1]:=-16-x: w[1]:=-y: v[2]:=-12-y: w[2]:=12+x: v[3]:=-8+x: w[3]:=16+y: v[4]:=12+y: w[4]:=12-x: v[5]:=16-x: w[5]:=-y: A:=solve((w[a]+w[b])/2+(v[a]-v[b])/(w[b]-w[a])(v-(v[a]+v[b])/2) =(w[a]+w[c])/2+(v[a]-v[c])/(w[c]-w[a])(v-(v[a]+v[c])/2),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='v): B:=solve((w[a]+w[b])/2+(v[a]-v[b])/(w[b]-w[a])(v-(v[a]+v[b])/2) =(w[a]+w[d])/2+(v[a]-v[d])/(w[d]-w[a])(v-(v[a]+v[d])/2),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='v): simplify(numer(A)denom(B)-numer(B)denom(A)) ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 15 Note that the expressions for A and B will be rational expressions,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' and so the last line gives a polynomial which equals 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For (a,b,c,d)=(1,2,3,4), this yields 4(y3+(12−x)y2+(x2+8x−16)y−x3+20x2+240x−192)(4+y+x)(= 0) and for (a, b, c, d) = (2, 3, 4, 5), it yields (y3+(3x+12)y2+(x2+40x−16)y+3x3−44x2+304x−192)(4+y−x)(= 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The cubic factor of the 2345 curve is the second of the five equations listed in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The 1234 equation here gives the vertical reflection of the EF equation in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For {1, β, γ, 5}, since ⊥1,5 is the line v = −x, we do A above for 1, b, and c, omit B, and simplify (numer(A)+x·denom(A)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This yields (beginning a practice of often omitting commas) 1235 x3 − 4x2 + (y2 + 8y − 80)x − 4y2 + 64(= 0) 1245 x(x2 + y2 + 24y + 16)(= 0) (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2) 1345 x3 − 12x2 + (y2 + 24y + 112)x + 4y2 − 64(= 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For each of these five cases, if 2, 3, and 4 are replaced by 8, 7, and 6, respectively, the equation is obtained by replacing y by −y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Altogether we have ten equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Compare with equations (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We describe LP for P in the top half of Q1 by the sets S for which πS is a degree-3 vertex of LP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Later in this section we will explain how we translate this description to the description involving corner points of the cube, which appeared in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For example, the case P = (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5) considered in Section 4 has πS for S = 123, 135, and 345 in its top half.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Maple plotting of perpendicular bisectors shows that the bottom half of this LP is essentially a flip of the top half, so has πS for S = 178, 157, and 567.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Section 6, we show that the only possible transitions from one L to another are of the type illustrated in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1, where an αβγδ intersection bounds one region whose L has πα,β,γ and πα,γ,δ vertices and another with πα,β,δ and πβ,γ,δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The point is that the 4-set2 defining the bounding curve must have two 3-subsets in each of the regions on either side of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' So, for example, the 1568 curve could not bound a region 2We use this to denote a set with 4 elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 16 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO containing L(1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5) because there are not two of the six 3-sets S for L(1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5) listed in the previous paragraph which are contained in {1, 5, 6, 8}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Of the ten equations determined above, all except the ones corresponding to 1568 and 1245 intersect the top half of Q1 in a curve which we denote as x = θαβγδ(y) for 0 ≤ y ≤ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Each y has three x values as solutions, but we neglect those that are complex or outside the region 0 ≤ x ≤ 4−y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The equation for 1245 does not intersect this region, and the one for 1568 does so only for 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='685 ≤ y ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Maple shows that, for 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6 < y < 4, θ1345(y) < θ2345(y) < θ1578(y) < θ1678(y) < θ5678(y) < θ1234(y) < θ1235(y) < θ1567(y), and that for 0 ≤ y ≤ 4 all eight of these curves satisfy 0 ≤ θαβγδ(y) ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For P in quadrant Q1, LP has the type of the case P = (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5) considered above, with degree-3 vertices corresponding to S = 123, 135, 345, 178, 157, and 567, until a transition occurs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This will define region A in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Now let 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6 < y < 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Since there are no αβγδ intersections of the eight types in the above string of inequalities in the region R = {(x, y) : 0 ≤ y ≤ 4, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='83 ≤ x < 4 − y}, and, as noted above, a 1568 intersection cannot affect L(1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5), we conclude that for all (x, y) in R, L(x,y) = L(1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5), with degree-3 vertices 123, 135, 345, 178, 157, and 567.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For this, we also need an observation in Section 6 that no other αβγδ can have an effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' As we move from the right, when the point P = (θ1567(y), y) is encountered, there is a transition from 157 and 567 to 156 and 167.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This is region G, with 123, 135, 345, 178, 156, and 167.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Next we encounter P = (θ1235(y), y), and this causes a transition to 125, 235, 345, 178, 156, and 167.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This is region F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The next two potential transitions at 1234 and 5678 do not effect a change, because neither of these 4-sets contain two 3-subsets which are vertices of region-F cut loci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Next at P = (θ1678(y), y) we have a transition, changing 178 and 167, leading to region E described by 125, 235, 345, 168, 156, and 678.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The next potential transition, 1578, does not effect a change, but then 2345 does, to 125, 234, 245, 168, 156, and 678 in region D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Finally, 1345 does not effect a change because it does not have two 3-sets of region D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Before we discuss other ranges of values of y, we point out that when a curve is crossed, it gives a degree-4 vertex of the cut locus, as shown in the middle part of ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 17 Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Thus, for points P on the curve GA separating regions G and A, LP has vertices abutting regions 123, 135, 345, 178, and 1567 of the star unfolding, and similarly for points on the other curves crossed in the above analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We also note that θ1567(1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='8 = θ1235(1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The same procedure is followed for other intervals of values of y, arranging the 4-sets S according to the order of θS(y), and then working from right-to-left to see whether the transitions are effective, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=', whether S contains two 3-sets which are vertices of the region under consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7085 < y < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6, the only change from the above order which causes a different transition is that θ1235(y) is now greater than θ1567(y), so the 1235 change takes place first, leading to region H with vertices 125, 235, 345, 157, 567, and 178.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The most interesting point is (6 − 2 √ 7, 6 − 2 √ 7) ≈ (.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7085, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7085), which lies on all of θ1567, θ1678, θ1568, θ1578, and θ5678.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3 These five curves reverse their order at y = 6 − 2 √ 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For 6 − 2 √ 7 < y < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='715, θ2345(y) < θ1578(y) < θ1678(y) < θ5678(y) < θ1567(y) < θ1568(y) < θ1234(y) < θ1235(y), which has the transitions described in the preceding paragraph, but for .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7045 < y < 6 − 2 √ 7, θ2345(y) < θ1568(y) < θ1567(y) < θ5678(y) < θ1678(y) < θ1578(y) < θ1234(y) < θ1235(y), which has a different order of transitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Let .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7045 < y < 6 − 2 √ 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' After the 1235 change, the next one is 1578, leading to region C with vertices 125, 235, 345, 158, 567, and 578.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The next transition is due to 5678, leading to region I with vertices 125, 235, 345, 158, 568, and 678.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The next transition is due to 1568, which brings us into region E, with vertices 125, 235, 345, 168, 156, and 678, which were already seen when considering larger values of y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Finally, a 2345 transition brings us into region D as above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The 2345 and 1568 curves intersect at y ≈ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7045, so for y < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7045, the 2345 transition precedes the 1568 transition, leading to region B with vertices 125, 234, 245, 158, 568, and 678.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For y > .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='685, there will be a 1568 transition into region D, but for y < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='685, there is no 1568 transition since θ1568(y) < 0 if y < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='685.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 3To see this remarkable fact, recall that these five curves are obtained by replac- ing y by −y in the polynomials in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2) and the paragraph it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' After doing this, let y = x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Each of the resulting polynomials equals x2 − 12x + 8 times a linear factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 18 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO This completes the description of the regions of the top half of quadrant Q1 with constant L, described in terms of the Voronoi cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Now we translate this description into one which has the cube’s corner numbers at the leaves, which is the description given in Section 2, and is needed for giving permuted descriptions in other quadrants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3, we show how the top half of the cut loci appear in terms of Voronoi cells, and list the regions in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 in which they appear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Each edge leading to a leaf is a perpendicular bisector separating Voronoi cells i and i + 1 for some i mod 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For i = 1, 2, 3, 4, the corner point at the end of this bisector is 1, 5, 2, 6, respectively, as can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The reader can check that this labeled diagram is consistent with the L in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Top half of cut loci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 1 5 3 2 4 A, G 123, 135, 345 1 2 3 4 5 C, E, F, H, I 125, 235, 345 1 2 5 3 4 B, D 125, 234, 245 In Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4, we do the same thing for the bottom half of cut loci in the top half of Q1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The corner numbers at the ends of segments bounding Voronoi cells 5 and 6, 6 and 7, 7 and 8, and 8 and 1 are 7, 3, 8, and 4, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Bottom half of cut loci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 7 6 8 1 5 A, H 157, 567, 178 1 5 6 7 8 C 158, 567, 578 7 6 8 5 1 B, I 158, 568, 678 7 6 5 1 8 D, E 156, 168, 678 8 7 1 5 6 F, G 156, 167, 178 A similar discussion could be made for the L associated to the curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' But it is easier and more insightful to note how the L for a curve bounding two regions is obtained from that of each of the two regions by collapsing a segment in which the two regions differ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For example, the L for the BD curve in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4 is obtained ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 19 from those in region B or D in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2 by collapsing the segment connecting the edges leading to corner points 4 and 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Similarly, the L for points of intersection of two curves is obtained by collapsing a segment in the L of each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For example, the L for point BDEI in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='6 is obtained from those of curves BD and EI in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4 by collapsing in each the highest vertical interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The L’s in Figures 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='5 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='7 are different from those seen previously in that they have a corner point labeling a degree-2 vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In these cases, the choice of cuts is not unique, but, of course, the cut locus does not depend on the choice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We comment briefly on the L in these cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' If P is on the left edge of the cube, the L is as seen in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' If P is at a corner point of the cube, the cut locus consists of segments from the corner point opposite P to each of the other corner points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' If P is at the center of a face F, the cut locus consists of the diagonals of the opposite face F op and the four edges of the cube connecting F and F op.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' If P = (x, 4 − x) with 0 < x < 4 is on the half-diagonal, then ⊥4,5 is the line w = 4, which intersects the point in the star-unfolding corresponding to corner point 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Then the short segment connecting the point π3,4,5 in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='4 with the point labeled 2 will have collapsed to a point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In the A diagram in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2, this is the collapse of the vertical segment from the point labeled 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This can be seen in terms of the Voronoi cells in the A-part of Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' A similar thing happens to the vertical segment leading to the point labeled 8, as the equation of ⊥7,8 is v = −8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In Section 2, we discussed how a permutation τ (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' σ) applied to corner points yields L in the vertical flip (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 90-degree clockwise rotation) of a region or curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Here we give a brief explanation of the reason for that.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Such a motion applied to a point P in the region has the same effect on geodesics from P, and hence on LP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Referring to Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1, we see that, for example, the corner point 8 in LP will be replaced by 5 (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 7), which expands to the asserted permutations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' No other transitions In this section, we present a proof that there are no regions other than those described earlier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' 20 DONALD M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' DAVIS AND MANYI GUO Suppose LP0 ̸= LP1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Let P(t) = (1 − t)P0 + tP1, and, for any 3-subset S of [8], let πS(t) = πS(P(t)), a path in the vw-plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' For each S such that πS(0) is a vertex of LP0, let t0(S) = sup{t ∈ [0, 1] : πS(t′) is a vertex of LP(t′) ∀t′ < t}, and let t0 = min{t0(S) : πS(0) is a vertex of LP0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Finally, let S = {α, β, γ} satisfy t0(S) = t0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The first transition in moving from P0 to P1 will involve πS(P(t0)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Proposition 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' There exists δ ∈ [8] − S and a decomposition of S as {β} ∪ {α, γ} such that π{α,γ,δ}(0) is a vertex of LP0, and πS(t0) = π{α,γ,δ}(t0) is a common vertex of LP(t0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' There exists ε > 0 and δ ∈ [8] − S such that for t in the interval (t0, t0 + ε), πS(t) is closer to Pδ than it is to Pα, Pβ, and Pγ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' The path πS crosses ⊥δ,η (P(t0)) for some η ∈ [8], and η must equal α, β, or γ, since for t in some interval (t0 − ε′, t0), πS(t) is closer to Pα, Pβ, and Pγ than it is to any other Pη.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Without loss of generality, say η = β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Then πS(t0) intersects ⊥β,δ (P(t0)), and so all six perpendicular bisectors from {α, β, γ, δ} intersect in LP(t0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' By minimality of t0, since π{α,γ,δ}(t0) ∈ LP(t0), we conclude that π{α,γ,δ}(0) ∈ LP0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' See Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='1 for a depiction of this transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' There are no transitions except those claimed earlier in the manu- script.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Let S1 = {2, 3, 4}, S2 = {1, 5}, and S3 = {6, 7, 8}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Recall that all of our asserted regions in Q1 have L with three vertices from S1 ∪S2 and three from S2 ∪S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' If LP0 ̸= LP1 with LP0 in one of our regions, and t0 is as above, so that we are considering the first transition in moving from P0 to P1, then the set {α, β, γ, δ} involved in the transition must either contain S2 or else equal one of {1, 2, 3, 4}, {2, 3, 4, 5}, {1, 6, 7, 8}, or {5, 6, 7, 8}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' This is true since sets with elements of type S1S2S3S3, S1S1S2S3, S1S1S3S3, S1S1S1S3, or S1S3S3S3 do not contain two 3-subsets of the type of the vertices of LP0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' In our earlier determination of the regions in Q1, we considered the four specific sets listed above (containing a single 1 or 5), and also all ISOMORPHISM CLASSES OF CUT LOCI FOR A CUBE 21 sets with elements of type S1S1S2S2 and S2S2S3S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' It remains to consider {1, 5, α, β} with α ∈ S1 and β ∈ S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' If P ∈ Q1, we use Maple, similarly to (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2), to see that, for α ∈ S1 and β ∈ S3, π{1,α,β}(P) does not lie on ⊥1,5 (P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Thus there can be no transitions other than the ones described earlier in the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We explain briefly the Maple work that led to this conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We follow steps that led to (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2) but using one of {β, γ} in S1 and one in S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We obtain equations similar to (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' We plot them and find that there are no solutions satisfying −4 < y < 4, 0 < x < 4 − |y|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' References [1] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Agarwal, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Arnov, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' O’Rourke, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Schevon, Star Unfolding of a Polytope with Applications, SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=', 26 (1997) 1689–1713.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' [2] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' O’Rourke and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='Vilcu, Cut Locus Realizations on Convex Polyhedra, CCCG (2021), arXiv 2102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='11097.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' [3] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Recio-Mitter, Geodesic complexity of motion planning, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Topol, 5 (2021) 141–178.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content=' Department of Mathematics, Lehigh University, Bethlehem, PA 18015, USA Email address: dmd1@lehigh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='edu Department of Mathematics, Lehigh University, Bethlehem, PA 18015, USA Email address: maga23@lehigh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} +page_content='edu' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/xNFIT4oBgHgl3EQfzyta/content/2301.11366v1.pdf'} diff --git a/xNFQT4oBgHgl3EQfADUm/content/2301.13221v1.pdf b/xNFQT4oBgHgl3EQfADUm/content/2301.13221v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..10e993f39fd3fb094e87f75a2f4faf3790b6299f --- /dev/null +++ b/xNFQT4oBgHgl3EQfADUm/content/2301.13221v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26520776eff3c85cf621e4a66b1113c9eb1fd3ce0ed7930f73cd3242cd00af97 +size 1820194 diff --git a/xNFQT4oBgHgl3EQfADUm/vector_store/index.pkl b/xNFQT4oBgHgl3EQfADUm/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..28dfe61de85df6f5670371abda5a03cbcc8cbeae --- /dev/null +++ b/xNFQT4oBgHgl3EQfADUm/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e251aa0d61ba7c874236592c81ad14fcea5f94f9d0e1559afe1d1042d625e1 +size 157281 diff --git a/ytE2T4oBgHgl3EQfMQam/content/tmp_files/2301.03723v1.pdf.txt b/ytE2T4oBgHgl3EQfMQam/content/tmp_files/2301.03723v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..de1f832970d1adaa089e3caa70a4dfe0404ceced --- /dev/null +++ b/ytE2T4oBgHgl3EQfMQam/content/tmp_files/2301.03723v1.pdf.txt @@ -0,0 +1,625 @@ +Analysis and Empirical Validation of +Visible Light Path Loss Model for Vehicular +Sensing and Communication +Hisham Abuella +School of Electrical and Computer +Engineering +Oklahoma State University +Stillwater, OK, USA +hisham.abuella@okstate.edu +Md Zobaer Islam +School of Electrical and Computer +Engineering +Oklahoma State University +Stillwater, OK, USA +zobaer.islam@@okstate.edu +Russ Messenger +School of Electrical and Computer +Engineering +Oklahoma State University +Stillwater, OK, USA +russ.messenger@okstate.edu +John F. O’Hara +School of Electrical and Computer +Engineering +Oklahoma State University +Stillwater, OK, USA +oharaj@okstate.edu +Sabit Ekin +Department of Engineering Technology +and Industrial Distribution +Texas A&M University +College Station, TX, USA +sabitekin@tamu.edu +Abstract—Advancements in lighting systems and photode- +tectors provide opportunities to develop viable alternatives to +conventional communication and sensing technologies, especially +in the vehicular industry. Most of the studies that propose +visible light in communication or sensing adopt the Lambertian +propagation (path loss) model. This model requires knowledge +and utilization of multiple parameters to calculate the path loss +such as photodetector area, incidence angle, and distance between +transmitter and receiver. In this letter, a simplified path loss +model that is mathematically more tractable is proposed for +vehicular sensing and communication systems that use visible +light technology. Field measurement campaigns are conducted to +validate the performance and limits of the developed path loss +model. The proposed model is used to fit the data collected at dif- +ferent ranges of incident angles and distances. Further, this model +can be used for designing visible light-based communication and +sensing systems to minimize the complexity of the Lambertian +path loss model, particularly for cases where the incident angle +between transmitter and receiver is relatively small. +Index Terms—Visible Light Communication (VLC), Visible +Light Sensing (VLS), Vehicle-to-Vehicle Communication (V2V), +Channel Modeling, Path Loss, Vehicular Technology, Lambertian +Path Loss Model. +I. INTRODUCTION +Wireless communication and sensing systems have expe- +rienced many advancements, from the discovery of elec- +tromagnetic (EM) waves, to wireless telegraphs and radios, +to modern smartphones, connected vehicles and Internet of +This work was supported by U.S. Department of Transportation through the +Transportation Consortium of South-Central States (Tran-SET) under Grant +No. 18ITSOKS01. +This work has been submitted to the IEEE for possible publication. +Copyright may be transferred without notice, after which this version may +no longer be accessible. +Things (IoT) devices. All of these new technologies need +wireless communication to adapt to the demand for higher +bandwidth and to the inflation of data consumption. New +technologies increasingly adopt wireless communications with +ever higher bandwidth and data consumption to accomplish +their purpose. In some systems, like in the vehicular industry, +utilizing the radio frequency (RF) technology can be inefficient +due to interference, spectrum scarcity, health concerns and +power limitations [1], [2]. In recent years, the interest for +use of visible light systems to enable wireless connectivity +in vehicle-to-vehicle (V2V) and vehicle-to-infrastructure (V2I) +links has increased. It is also called as V2X communication. +This increasing interest is due to the advancements in light- +emitting diode (LED) and photodetector (PD) technologies. +Moreover, these systems are less costly, highly efficient and +can support high data rates. One important requirement for +visible light communication is to avoid human perception of +flickering at the light source, an easily achievable task by +employing intensity modulation faster than 200 Hz [3]. +There are multiple studies discussing different implemen- +tations of visible light communication (VLC) in V2X appli- +cations. Lui et al. introduced the idea of enabling Vehicular +VLC (V2LC) systems and studied the effect of multiple +vehicles and the visible light noise and interference [4]. They +considered the reliability and latency requirements to examine +the V2LC performance. Multiple studies have demonstrated +the VLC systems for outdoor vehicular scenarios. Cailean +et al. presented a short distance prototype to transmit data +using Miller and Manchester coding [5]. Lourenco et al. +discussed the challenges that outdoor VLC systems face and +implemented a prototype that can achieve low data rates in +arXiv:2301.03723v1 [eess.SP] 9 Jan 2023 + +presence of high optical noise levels [6]. +In most of the previously mentioned studies, understating +the path loss model and estimating the received light power in +different positions (different distances) are critical in estimat- +ing the theoretical performance limits of VLC systems. Few +researchers have studied and analyzed the visible light channel +and its path loss model behavior in real world scenarios, which +are important and critical parameters to be considered. The +Lambertian propagation (path loss) model was adopted by +most of the VLC and VLS (visible light sensing) studies [1], +[2], [7], [8]. This propagation model works well in indoor +scenarios. However, the same model may not work well for +visible light communication and sensing in V2X applications. +As a potential solution, some studies introduced empirical +path loss models for the outdoor vehicular VLC systems. Cui +et al. studied an outdoor VLC link using LED traffic lights +where the link path loss model is analyzed both theoreti- +cally and experimentally [9]. Viriyasitavat et al. derived a +realistic path loss model for VLC systems using off-the-shelf +scooter taillights. The proposed model accurately estimated +the received power from the taillight up to 10 meters [10]. +Turan et al. did a frequency domain channel sounding and +characterization for a vehicular VLC in different scenar- +ios [11]. A comparison between radio frequency and visible +light propagation channels for vehicular communication was +presented by Cheng et al. in [12]. Recently, there is a shift +in the studies to simplify the visible light path loss model for +outdoor vehicular scenarios and to verify these models using +simulations in ray tracing software, as done in [13]. Moreover, +Elamassie et al. developed a path loss model for V2V links +as a function of distance under different weather conditions +and confirmed their models using ray tracing software [14]. +In addition to ray tracing tools, Eso et al. performed an +experimental investigation on the effects of fog on optical +camera based VLC. Memedi et al. investigated the impact +of vehicle type and headlight characteristics on the VLC +performance [15]. However, in the aforementioned studies, the +proposed models are experimentally tested with only a limited +number of static points (2D or 3D) between the receiver and +transmitter. +In this letter, a simplified visible light path loss model +is proposed, analyzed and empirically validated for outdoor +V2I communication and sensing applications. To verify the +proposed model, data is collected from field measurements +using off-the-shelf LED lights and a PD using a new dynamic +channel modeling approach. This model can be used by differ- +ent studies to decrease the analysis complexity (i.e., increase +mathematical tractability), where the Lambertian propagation +model would be assumed and the incident angle is small +enough. Moreover, the limits of the developed path loss model +are provided. +In summary, the main contributions of this letter are as +follows: +• Proposing and analyzing a mathematically more tractable +and simplified visible light path loss model. +Fig. 1. The system model that is used in the study. +Fig. 2. The experimental setting used for collecting data. +• Verifying the proposed path loss model by field measure- +ments using a dynamic channel modeling approach. +• Discussing the limits of the proposed path loss model and +dynamic channel modeling approach. +As per our knowledge, this is the first work to provide +and verify a simplified visible light path loss model for V2X +applications that takes the motion of the vehicle (dynamic +channel modeling) into account. +The rest of the letter is organized as follows. In Section II, +the system model and the experimental setting are provided. In +Section III, the developed path loss model and its mathematical +proof are given. In Section IV, the comparison and the +field measurements are presented. Finally, the conclusions are +discussed in Section V. +II. SYSTEM MODEL AND EXPERIMENTAL SETTING +The system model under consideration is shown in the +Fig. 1, where 𝜃 and 𝑤 are the incidence angle and the lateral +distance between the vehicle and the photodetector (PD), +respectively. 𝑅 and 𝐷 are the varying longitudinal distance +and the actual distance between the vehicle and the PD, +respectively. For simplicity, longitudinal distance (𝑅) will be +referred to as range and actual distance between the vehicle +and the PD (𝐷) will be referred to as distance between the +PD and vehicle for the rest of the paper. +The actual experimental setting used to collect the data is +presented in Fig. 2. A Honda Civic 2008 with a LASFIT L1 +LED headlamp [16] is used as the transmitter. A photodetector + +W +0 +Photo +V +ADO +Detector +R +FOV +DSP and DisplayTransmitter +(Light Source) +Receiver (PD)TABLE I +PARAMETERS USED IN DATA COLLECTION EXPERIMENTAL SETTING +Parameter +Value +Transmitter (Tx) LED +Honda Civic 2008 with a LASFIT +(L1 9005) LED +Receiver (Rx) PD +Thorlabs PDA 100A +Receiver detection area +10 mm2 +Avg. Tx power (electrical) +20 Watts +Tx and Rx Height +60 cm +ADC (PiPlate (DAQC2plate) +366 𝜇𝑉 per bit, up to 12 𝑉 input range, and +50 KHz Samples/sec +from Thorlabs (PDA 100A) [17] is used as the receiver and a +RaspberryPi Model3B+ [18] is used as the processing module. +A PiPlate (DAQC2plate) [19] is utilized as an analog-to- +digital converter (ADC) in the proposed V2X system. All +the parameters used in the experimental setting are stated in +Table I. A 50Ω BNC terminator is added to the PDA 100A +which, according to the datasheet of the photodetector in [17], +decreases the reflections and improves the signal to noise +ratio of the collected signal which will be between 0V-5V. +In addition, the received light power will be a function of this +collected signal and can be converted according to the ampli- +fier mode and the wavelength of the visible light used [17]. A +335nm-610 nm bandpass optical filter from Thorlabs is used to +remove the effect of IR and lower wavelengths. The equation +used to convert the received signal to the received light power +is as follows [17]: +𝑃𝑖𝑛 = +2𝑉𝑜𝑢𝑡 +𝑅𝑃𝐷𝐴100𝐴(𝜆)𝐺(𝐺𝑎𝑚𝑝) , +(1) +where 𝑃𝑖𝑛 is the received light power at the PD (watts), +𝑉𝑜𝑢𝑡 is the output signal from the ADC (V), 𝑅𝑃𝐷𝐴100𝐴(𝜆) +is the PDA100A responsivity to different wavelengths which +is 0.4 Amp/watt for 610 nm wavelength, and 𝐺(𝐺𝑎𝑚𝑝) is the +transimpedance gain (V/Amp) which will vary according to the +chosen amplifier variable gain (𝐺𝑎𝑚𝑝) (from 0 dB to 70 dB). +Equation (1) according to [17] can be simplified as: +[𝑃𝑖𝑛]𝑑𝐵𝑊 = 10 log10(𝑉𝑜𝑢𝑡) − 0.5𝐺𝑎𝑚𝑝 − 21.76, +(2) +where [𝑃𝑖𝑛]𝑑𝐵𝑊 is the received light power at the PD in dBW +scale. +A Python script is used to collect the data samples and +save them on the RaspberryPi. Finally, offline data processing +is done on a laptop using MATLAB scripts. +III. PROPOSED CHANNEL MODEL +The visible light channel has been studied intensively in +the context of indoor communications [1], [2]. One of the +most used and adopted line-of-sight (LOS) channel models is +Lambertian model [20]. The Lambertian model presents the +effects of different variables and parameters that make the +visible light signal vary at the receiver end. The parameters +are the transmitter power, the distance between light source +and PD, optical PD size, PD field of view (FOV) (which is the +region of space where the detector can detect any light entering +it) and the incident angle (𝜃). The Lambertian model for the +received signal power-distance relation is given as follows: +𝑃𝑟 = (𝑛 + 1)𝐴𝑅𝑃𝑡 +2𝜋𝐷𝛾 +cos𝑛(𝜙) cos(𝜃), ∀𝜃 < 𝜙1/2, +(3) +where 𝑃𝑡 is the transmitter power in watts, 𝑃𝑟 is the received +signal power in watts and 𝐴𝑅 is the optical detector size. 𝜙 and +𝜃 are angles of irradiance and incidence, respectively. The path +loss exponent 𝛾 depends on the environmental conditions, such +as reflectiveness of materials, light conditions, etc. Typically, +the range of path loss exponent lies between 1 and 6. +In addition, 𝜙1/2 is the semi-angle at half-power of the LED +(which is half of the FOV of the light source), and 𝑛 is the +order of the Lambertian model and is given by +𝑛 = − +ln(2) +ln(cos 𝜙1/2) . +(4) +In our case, the light source and the receiver have the same +movement directions and at the same height, therefore, +𝜃 = 𝜙, +(5) +where 0 < 𝜃 < 𝜙1/2. Using (5), (3) can be further simplified +as +𝑃𝑟 = (𝑛 + 1)𝐴𝑅𝑃𝑡 +2𝜋𝐷𝛾 +cos𝑛+1(𝜃). +(6) +Then, in order to derive 𝑃𝑟 (𝑡) in terms of 𝐷(𝑡), (6) is further +simplified by defining a constant 𝐾 as +𝐾 = (𝑛 + 1)𝐴𝑅𝑃𝑡 +2𝜋 +. +(7) +Which leads to, +𝑃𝑟 = 𝐾𝐷−𝛾 cos𝑛+1(𝜃). +(8) +Taking 10 log10(.) at both sides, +𝑃𝑟𝑑𝐵 = 𝐾𝑑𝐵 − 𝛾𝐷𝑑𝐵 + 10(𝑛 + 1) log10(cos(𝜃)), +(9) +where 𝑃𝑟𝑑𝐵 = 10 log10(𝑃𝑟), 𝐾𝑑𝐵 = 10 log10(𝐾), and 𝐷𝑑𝐵 = +10 log10(𝐷). +It is given that cos(𝜃) = 𝑅 +𝐷 = +√ +𝐷2−𝑤2 +𝐷 += +√︃ +1 − 𝑤2 +𝐷2 . Hence, +𝑃𝑟𝑑𝐵 = 𝐾𝑑𝐵 − 𝛾𝐷𝑑𝐵 + 5(𝑛 + 1) log10 +� +1 − 𝑤2 +𝐷2 +� +. +(10) +Finally, the expression can be divided into two conditions: +𝑃𝑟𝑑𝐵 = +� +𝐾𝑑𝐵 − 𝛾𝐷𝑑𝐵, +𝑤2 +𝐷2 << 1 +𝐾𝑑𝐵 − 𝛾𝐷𝑑𝐵 + 𝐺𝑑𝐵, +𝑜𝑡ℎ𝑒𝑟𝑤𝑖𝑠𝑒 +(11) +where 𝐺𝑑𝐵 = 5(𝑛 +1) log10 +� +1 − 𝑤2 +𝐷2 +� +. The first condition (far- +scenario) is when 𝐷 is large enough or when 𝑤 is small, which +can also be called as log-linear simplified model as 𝑃𝑟𝑑𝐵 = +𝐾𝑑𝐵−𝛾𝐷𝑑𝐵 → 𝑃𝑟 = 𝐾𝐷−𝛾. While the second condition (near- +scenario) is when 𝐷 is small or when 𝑤 is large compared to +𝐷. +Notice that for a constant value of 𝐾𝑑𝐵 and 𝛾, the received +power changes with the distance. The longer the distance + +Fig. 3. +Static channel model with vehicle equipped with LED headlamps +(night scenario measurements). +between the transmitter (light source, e.g. LED) and receiver +(PD), the lower the received power is and a factor 𝐺𝑑𝐵 is +added in near-scenarios. +IV. MEASUREMENT RESULTS AND VERIFICATION +In this section, the data collected from the setting presented +in Fig. 2 in both static and dynamic channel modeling scenar- +ios are presented. In case of dynamic channel modeling, data +for both night and daylight environments is presented. +A. Static Channel Modeling Scenario +In Fig. 3, the channel model is estimated after measuring +the received power at several distance points at night and +fitting the collected measurement values to estimate the best +channel model parameters to fit the data. As shown in Fig. 3, +the data is collected on multiple distances between (8 m-12 m) +with 200 samples averaged at each point (2 seconds of data +at 100 samples/sec sampling rate). As the distance increased, +the received power decreased at an exponential rate of 𝛾. It +is evident that the measurements at different distance points +do not provide a complete picture of how the channel model +is changing and behaving over different regions explained +in Section III. However one can observe that the linearity +of the figure starts decreasing when 𝐷𝑑𝐵 < 10. Therefore, +continuous measurements while vehicle approaching towards +the PD would provide a better picture for realistic visible light +channel modeling, which is discussed in the next section. +B. Dynamic Channel Modeling Scenario +The dynamic channel modeling is followed to overcome +the limitations discussed in the previous subsection. We will +first present how dynamic channel modeling is performed +and the assumptions are taken when performing it. Then, the +data collected at night and in sunny daylight conditions are +presented. +Fig. 4. Received power (dB) at night versus time (sec) with vehicle equipped +with normal headlamps. +TABLE II +CHANNEL PARAMETERS ESTIMATED USING DYNAMIC AND STATIC +CHANNEL MODELING IN NIGHT AND DAYLIGHT ENVIRONMENTS. +Environment +𝐾𝑑𝐵 +𝛾 +Night vehicle +-35.2680 dB +0.9707 +Sunny daylight vehicle +-32.6335 dB +0.0175 +The received light power at the PD is collected for a vehicle +approaching the PD with a constant speed (𝑉) of 20 mph +(8.9408 m/sec) as shown in Fig. 1. The data is collected for 10 +seconds with a sampling rate of 100 samples/sec (total of 1000 +samples). After identifying the range (R) where the peak of the +received power happens (𝑅𝑝𝑒𝑎𝑘) and saving it as a reference, +the peak of the data saved is located as shown in Fig. 4. Then, +the curve is flipped and the power value corresponding to +each distance point is identified using the timing information. +Therefore, the range (R) of each data point is calculated as +follows: +𝑅𝑖 = 𝑅𝑃𝑒𝑎𝑘 + 𝑉(𝑇𝑃𝑒𝑎𝑘 − 𝑡𝑖), +(12) +where 𝑅𝑖 is the range (horizontal distance between vehicle +and PD, 𝑇𝑃𝑒𝑎𝑘 time stamp of the data point where the peak is +measured, and 𝑡𝑖 is the time stamp of each data point measured. +Additional details for the channel modeling experimental +setup are as follows: +• The vehicle is moving at a known constant speed (𝑉). +• Vertical distance of the vehicle from PD (𝑤) is also +constant and known. +• The range (R) at which the peak power is received at PD +is known. +After transforming the time axis with range (R) or actual +distance (D) axis using (12), the channel model can be +estimated using linear fitting as shown in 𝑃𝑟𝑑𝐵 = 𝐾𝑑𝐵 −𝛾𝐷𝑑𝐵. +As shown in Fig. 5, it is clear that the linear region of the +channel model starts at a range larger than around 10 meters. +When the range is less, the channel behavior is seen to be + +-43 +Actual measured power +-43.5 +Estimatedmodel +The recieved power (dBW) +-44 +44.5 +-45 +-45.5 +-46 +-46.5 +-47 +7 +8 +9 +10 +11 +12 +13 + (dB)-40 +X: 3.811 +Y: -43.04 +-45 +P +oak +(dBW) +Receivedpower +-50 +Vehicleout +of FOV of - --) +PD region +-55 +Vehicle inFOV of +PD region +-60 +peak +-65 +0 +1 +2 +3 +4 +5 +6 +Time (sec)Fig. 5. Received power (dBW) at night versus distance in log domain (dB) +for different test cases and expression for linear region with 𝐾𝑑𝐵 = −32.84 +and 𝛾 = 1.173. +Fig. 6. +Received power (dBW) at sunny daylight versus distance in log +domain (dB) for different test cases and expression for linear region with +𝐾𝑑𝐵 = −32.63 and 𝛾 = −0.01793. +non-linear which is because of 𝐺𝑑𝐵 in (11) (when condition +𝑤2 +𝐷2 << 1 is not satisfied). In addition, it is clear that there +is a difference in received light power at night and sunny +daylight scenarios as shown in Fig. 6. In daylight scenario, +power received by the PD is higher and noisier because of the +additional interference from sunlight. +V. CONCLUSIONS +This letter analyzed and validated a mathematically more +tractable path loss channel model that can be used for vehicular +sensing and communication applications. According to the +developed model, when the incident angle of light at the +receiver was small, the received light power became linear with +respect to logarithmic distance between the transmitter and the +receiver. The model was validated by a set of experimental +measurements in both static and dynamic scenarios, and in +both night and daytime settings. This proposed model can +be utilized in multiple visible light enabled sensing and +communication applications where reduction of the complexity +of the channel model is needed. +REFERENCES +[1] D. Karunatilaka, F. Zafar, V. Kalavally, and R. Parthiban, “LED based +indoor visible light communications: State of the art,” IEEE Commu- +nications Surveys Tutorials, vol. 17, no. 3, pp. 1649–1678, thirdquarter +2015. +[2] P. H. Pathak, X. Feng, P. Hu, and P. Mohapatra, “Visible light commu- +nication, networking, and sensing: A survey, potential and challenges,” +IEEE Communications Surveys & Tutorials, vol. 17, no. 4, pp. 2047– +2077, 2015. +[3] S. Rajagopal, R. D. Roberts, and S. K. Lim, “IEEE 802.15.7 visible +light communication: modulation schemes and dimming support,” IEEE +Communications Magazine, vol. 50, no. 3, pp. 72–82, March 2012. +[4] C. B. Liu, B. Sadeghi, and E. W. Knightly, “Enabling vehicular +visible light communication (V2LC) networks,” in Proceedings of the +Eighth ACM International Workshop on Vehicular Inter-networking, +ser. VANET ’11. +New York, NY, USA: ACM, 2011, pp. 41–50. +[Online]. Available: http://doi.acm.org/10.1145/2030698.2030705 +[5] A. Cailean, B. Cagneau, L. Chassagne, S. Topsu, Y. Alayli, and +J. Blosseville, “Visible light communications: Application to cooperation +between vehicles and road infrastructures,” in 2012 IEEE Intelligent +Vehicles Symposium, June 2012, pp. 1055–1059. +[6] N. Lourenco, D. Terra, N. Kumar, L. N. Alves, and R. L. Aguiar, +“Visible light communication system for outdoor applications,” in 2012 +8th International Symposium on Communication Systems, Networks +Digital Signal Processing (CSNDSP), July 2012, pp. 1–6. +[7] Y. Qiu, H.-H. Chen, and W.-X. Meng, “Channel modeling for visible +light communications—a survey,” Wireless Communications and Mobile +Computing, vol. 16, no. 14, pp. 2016–2034, 2016. +[8] H. Abuella, F. Miramirkhani, S. Ekin, M. Uysal, and S. Ahmed, +“ViLDAR–visible light sensing-based speed estimation using vehicle +headlamps,” IEEE Transactions on Vehicular Technology, vol. 68, no. 11, +pp. 10 406–10 417, 2019. +[9] K. Cui, G. Chen, Z. Xu, and R. D. Roberts, “Traffic light to vehicle vis- +ible light communication channel characterization,” Appl. Opt., vol. 51, +no. 27, pp. 6594–6605, Sep 2012. +[10] W. Viriyasitavat, S. Yu, and H. Tsai, “Short paper: Channel model for +visible light communications using off-the-shelf scooter taillight,” in +2013 IEEE Vehicular Networking Conference, Dec 2013, pp. 170–173. +[11] B. Turan, G. Gurbilek, A. Uyrus, and S. C. Ergen, “Vehicular vlc +frequency domain channel sounding and characterization,” in 2018 IEEE +Vehicular Networking Conference (VNC), Dec 2018, pp. 1–8. +[12] L. Cheng, W. Viriyasitavat, M. Boban, and H. Tsai, “Comparison of +radio frequency and visible light propagation channels for vehicular +communications,” IEEE Access, vol. 6, pp. 2634–2644, 2018. +[13] H. B. Eldeeb, F. Miramirkhani, and M. Uysal, “A path loss model for +vehicle-to-vehicle visible light communications,” in 2019 15th Interna- +tional Conference on Telecommunications (ConTEL), 2019, pp. 1–5. +[14] M. Elamassie, M. Karbalayghareh, F. Miramirkhani, R. C. Kizilirmak, +and M. Uysal, “Effect of fog and rain on the performance of vehicular +visible light communications,” in 2018 IEEE 87th Vehicular Technology +Conference (VTC Spring), 2018, pp. 1–6. +[15] A. Memedi, C. Tebruegge, J. Jahneke, and F. Dressler, “Impact of +vehicle type and headlight characteristics on vehicular vlc performance,” +in 2018 IEEE Vehicular Networking Conference (VNC), Dec 2018, pp. +1–8. +[16] “LASFIT (L1 9005) LED),” Accessed: 2018. [Online]. Available: +www.lasfit.com +[17] “Photo-detector (Thorlabs) PDA-100A,” Accessed: 2018. [Online]. +Available: www.thorlabs.com +[18] “RaspberryPi +(miniature +computer),” +Accessed: +2018. +[Online]. +Available: www.raspberrypi.org +[19] “Pi-Plates +(DAQC2plate),” +Accessed: +2018. +[Online]. +Available: +www.pi-plates.com +[20] F. R. Gfeller and U. Bapst, “Wireless in-house data communication via +diffuse infrared radiation,” Proceedings of the IEEE, vol. 67, no. 11, pp. +1474–1486, Nov 1979. + +-42 +44 +Linearregion +Received power (dBW) +-46 +Non-linear region +-48 +-50 +test# 1 +test# 2 +test# 3 +P += - 32.84 - 1.173*D +52 +test# 4 +dB +dB +test# 5 +linear fit +-54 +2 +4 +6 +8 +10 +12 +14 +16 +D.-32.5 +test# 1 +-32.55 +test# 2 +test# 3 +32.6 +test# 4 +test# 5 +Received power (dBW) +-32.65 +linearfit +-32.7 +-32.75 +Linear region +-32.8 +32.85 +Non-linearregion +-32.9 +-32.95 +P +dB +-33 +2 +4 +6 +8 +10 +12 +14 +16 \ No newline at end of file diff --git a/zNAyT4oBgHgl3EQfn_iY/content/2301.00499v1.pdf b/zNAyT4oBgHgl3EQfn_iY/content/2301.00499v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..03c8edcd9080e13dd0292d5d149cfd31ca14a96a --- /dev/null +++ b/zNAyT4oBgHgl3EQfn_iY/content/2301.00499v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa99c680956bb252689366ea81bad059eeee60a83439aec9fabb26a7179b1bef +size 694925 diff --git a/zNAyT4oBgHgl3EQfn_iY/vector_store/index.faiss b/zNAyT4oBgHgl3EQfn_iY/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..d1a6c2157b5e4696a938a8914086e58a35c2d0ec --- /dev/null +++ b/zNAyT4oBgHgl3EQfn_iY/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aed9bb0d8083a75608fb736ac64ff27c8ad92f3128e2db18db9ea241e60401e +size 4915245 diff --git a/zdAyT4oBgHgl3EQfO_bl/vector_store/index.faiss b/zdAyT4oBgHgl3EQfO_bl/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..a2694e839d2875dc4eaf0a5ba3cf795cf185fd8d --- /dev/null +++ b/zdAyT4oBgHgl3EQfO_bl/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aea051bc8ccf052fbc3b3767139aabfcf02a62204e1cfba2158e8ed10e828172 +size 2228269 diff --git a/ztAyT4oBgHgl3EQfn_j3/vector_store/index.faiss b/ztAyT4oBgHgl3EQfn_j3/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..4f2dc667cbf7f1e79620edae33f58fa88316d976 --- /dev/null +++ b/ztAyT4oBgHgl3EQfn_j3/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0072fd0a1e91dc261d10c0d7a128177b55e2a9c2c6a5a5325e9c0d97b2f1cbf9 +size 3342381